LangGraph是一个基于图计算的大语言模型应用开发框架,它通过将复杂任务分解为多个节点并构建执行图来实现高效的LLM应用开发。以上代码提供了LangGraph的环境搭建和基础、高级应用示例,主要包括:
- 环境搭建脚本:自动化安装系统依赖、创建虚拟环境并安装LangGraph及其依赖库
- 基础使用示例:展示如何构建一个简单的问答系统,包含用户输入、网络搜索和答案生成三个节点
- 高级应用示例:实现了一个完整的文档问答系统,包括文档加载、文本分割、向量索引和检索式问答等功能
使用前请替换示例中的OpenAI API密钥,并根据实际需求调整节点和图谱结构。
# LangGraph高级应用示例:文档问答系统
from langgraph import Graph, Node
from langgraph.llms import OpenAILLM
from langgraph.memory import VectorDBMemory
from langgraph.tools import DocumentLoader, TextSplitter, EmbeddingGenerator
from langgraph.pipelines import RetrievalQA
# 初始化组件
llm = OpenAILLM(api_key="your_openai_api_key")
embedding = EmbeddingGenerator(model_name="sentence-transformers/all-MiniLM-L6-v2")
memory = VectorDBMemory(embedding=embedding)
# 定义文档处理节点
@Node()
def load_documents(file_path: str) -> list:
loader = DocumentLoader()
return loader.load(file_path)
@Node()
def split_text(documents: list) -> list:
splitter = TextSplitter(chunk_size=1000, chunk_overlap=100)
return splitter.split(documents)
@Node()
def index_documents(texts: list) -> None:
for text in texts:
memory.add(text)
return "文档已索引"
# 定义问答节点
@Node()
def retrieve_context(query: str) -> list:
return memory.search(query, k=3)
@Node()
def generate_answer(query: str, context: list) -> str:
context_text = "\n\n".join(context)
prompt = f"""基于以下上下文回答问题:
问题:{query}
上下文:{context_text}
"""
return llm.generate(prompt)
# 构建图谱
graph = Graph()
# 文档处理流程
graph.add_node(load_documents)
graph.add_node(split_text)
graph.add_node(index_documents)
graph.connect(load_documents, split_text)
graph.connect(split_text, index_documents)
# 问答流程
graph.add_node(retrieve_context)
graph.add_node(generate_answer)
graph.connect(retrieve_context, generate_answer)
# 执行文档处理
documents = graph.run(load_documents("path/to/your/document.pdf"))
texts = graph.run(split_text(documents))
graph.run(index_documents(texts))
# 执行问答
query = "LangGraph的主要应用场景有哪些?"
context = graph.run(retrieve_context(query))
answer = graph.run(generate_answer(query, context))
print(f"问题:{query}")
print(f"答案:{answer}")
# LangGraph基础使用示例
from langgraph import Graph, Node
from langgraph.llms import OpenAILLM
from langgraph.memory import InMemoryKnowledgeGraph
from langgraph.tools import WebSearchTool
# 初始化LLM
llm = OpenAILLM(api_key="your_openai_api_key")
# 创建知识图谱
memory = InMemoryKnowledgeGraph()
# 定义节点
@Node()
def user_input(query: str) -> str:
return query
@Node()
def search_web(query: str) -> str:
tool = WebSearchTool()
return tool.run(query)
@Node()
def generate_answer(query: str, search_results: str) -> str:
prompt = f"基于以下搜索结果回答问题:{query}\n\n搜索结果:{search_results}"
return llm.generate(prompt)
# 构建图谱
graph = Graph()
graph.add_node(user_input)
graph.add_node(search_web)
graph.add_node(generate_answer)
# 连接节点
graph.connect(user_input, search_web)
graph.connect(search_web, generate_answer)
graph.connect(user_input, generate_answer, edge_label="direct_query")
# 执行图谱
result = graph.run(user_input("什么是LangGraph?"))
print(result)
安装所需插件
# LangGraph环境搭建脚本
# 检查并安装系统依赖
echo "正在检查并安装系统依赖..."
sudo apt-get update
sudo apt-get install -y build-essential python3-dev python3-pip git curl
# 创建虚拟环境
echo "正在创建Python虚拟环境..."
python3 -m venv langgraph_env
source langgraph_env/bin/activate
echo "虚拟环境已激活"
# 安装PyTorch (根据CUDA版本选择,这里假设使用CPU版本)
echo "正在安装PyTorch..."
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
# 安装LangGraph及其依赖
echo "正在安装LangGraph及其依赖..."
pip install langgraph transformers sentence-transformers faiss-cpu
# 验证安装
echo "验证安装..."
python -c "import langgraph; print(f'LangGraph版本: {langgraph.__version__}')"
echo "LangGraph环境搭建完成!"
echo "使用 'source langgraph_env/bin/activate' 激活环境"