AI Agent设计模式三:Routing

发布于:2025-04-06 ⋅ 阅读:(64) ⋅ 点赞:(0)

概念 :动态路径选择器

  • ✅ 优点:灵活处理不同类型输入
  • ❌ 缺点:路由逻辑复杂度高

在这里插入图片描述

from typing import TypedDict
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
from typing_extensions import Literal
import os

from langchain_core.pydantic_v1 import BaseModel,Field

# 初始化模型
llm = ChatOpenAI(
    model="gpt-3.5-turbo",
    openai_api_key=os.environ["GPT_API_KEY"],
    openai_api_base="https://api.chatanywhere.tech/v1",
    streaming=False  # 禁用流式传输
)

class Route(BaseModel):
    step: Literal["议论文", "记叙文", "散文"] = Field(
        description="写作风格类型,必须是以下之一:议论文、记叙文、散文"
    )

class State(TypedDict):
    input: str
    decision: str
    output: str

# 定义提示词映射表
PROMPT_TEMPLATES = {
    "议论文": "请以严谨的逻辑结构阐述观点,包含明确的论点、论据和结论。",
    "记叙文": "请用生动形象的语言讲述一个完整的故事,包含时间、地点、人物和事件发展。",
    "散文": "请以抒情或叙议结合的方式表达主题思想,注重语言美感和意境营造。"
}

def llm_call_1(state: State):
    prompt = f"{PROMPT_TEMPLATES['议论文']} \n 主题如下:{state['input']}"
    result = llm.invoke(prompt)
    return {'output': result.content}

def llm_call_2(state: State):
    prompt = f"{PROMPT_TEMPLATES['记叙文']} \n 主题如下:{state['input']}"
    result = llm.invoke(prompt)
    return {'output': result.content}

def llm_call_3(state: State):
    prompt = f"{PROMPT_TEMPLATES['散文']} \n 主题如下:{state['input']}"
    result = llm.invoke(prompt)
    return {'output': result.content}

def llm_call_router(state: State):
    # 结构化输出
    router = llm.with_structured_output(Route, method="function_calling")

    print(f"state:{state}")
    decision = router.invoke(
        [
            SystemMessage(content="你是一个写作风格分类器。请根据输入的主题判断最适合的写作风格。"),
            HumanMessage(content=state['input'])
        ]
    )
    print(f"decision:{decision}")
    return {'decision': decision.step}

# 条件边
def router_decision(state: State):
    if state['decision'] == '议论文':
        return "llm_call_1"
    elif state['decision'] == '记叙文':
        return "llm_call_2"
    elif state['decision'] == '散文':
        return "llm_call_3"

workflow = StateGraph(State)

# 添加节点
workflow.add_node("llm_call_1", llm_call_1)
workflow.add_node("llm_call_2", llm_call_2)
workflow.add_node("llm_call_3", llm_call_3)
workflow.add_node("llm_call_router", llm_call_router)

# 添加边
workflow.add_edge(START, "llm_call_router")
workflow.add_conditional_edges("llm_call_router",
                               router_decision,
                               {
                                "llm_call_1": "llm_call_1",
                                "llm_call_2": "llm_call_2",
                                "llm_call_3": "llm_call_3"
                               })
workflow.add_edge("llm_call_1", END)
workflow.add_edge("llm_call_2", END)
workflow.add_edge("llm_call_3", END)

graph = workflow.compile()

response = graph.invoke({"input": "天边的云"})
print(f"选择的文体为:{response['decision']}")
print(f"文章:{response['output']}")

执行结果
在这里插入图片描述

常见问题

遇到的问题如下:

结构化输出这里太难用了 每次都报结构化输出失败。。。
router = llm.with_structured_output(Route)

openai.BadRequestError: Error code: 400 - {‘error’: {‘code’: ‘invalid_parameter_error’, ‘param’: None, ‘message’: ‘<400> InternalError.Algo.InvalidParameter: The tool call is not supported.’, ‘type’: ‘invalid_request_error’}, ‘id’: ‘chatcmpl-a711b580-58af-9286-bad1-ddc36b8a44d2’, ‘request_id’: ‘a711b580-58af-9286-bad1-ddc36b8a44d2’}
During task with name ‘llm_call_router’ and id ‘3437df04-e2bc-aac5-f29b-c3417070c369’

原因:
with_structured_output方法对很多大模型没有适配,原本用的deepseek一直报错,换成chatgpt之后就没问题了