概念 :動態路徑選擇器
- ? 優點:靈活處理不同類型輸入
- ? 缺點:路由邏輯復雜度高
from typing import TypedDict
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
from typing_extensions import Literal
import osfrom langchain_core.pydantic_v1 import BaseModel,Field# 初始化模型
llm = ChatOpenAI(model="gpt-3.5-turbo",openai_api_key=os.environ["GPT_API_KEY"],openai_api_base="https://api.chatanywhere.tech/v1",streaming=False # 禁用流式傳輸
)class Route(BaseModel):step: Literal["議論文", "記敘文", "散文"] = Field(description="寫作風格類型,必須是以下之一:議論文、記敘文、散文")class State(TypedDict):input: strdecision: stroutput: str# 定義提示詞映射表
PROMPT_TEMPLATES = {"議論文": "請以嚴謹的邏輯結構闡述觀點,包含明確的論點、論據和結論。","記敘文": "請用生動形象的語言講述一個完整的故事,包含時間、地點、人物和事件發展。","散文": "請以抒情或敘議結合的方式表達主題思想,注重語言美感和意境營造。"
}def llm_call_1(state: State):prompt = f"{PROMPT_TEMPLATES['議論文']} \n 主題如下:{state['input']}"result = llm.invoke(prompt)return {'output': result.content}def llm_call_2(state: State):prompt = f"{PROMPT_TEMPLATES['記敘文']} \n 主題如下:{state['input']}"result = llm.invoke(prompt)return {'output': result.content}def llm_call_3(state: State):prompt = f"{PROMPT_TEMPLATES['散文']} \n 主題如下:{state['input']}"result = llm.invoke(prompt)return {'output': result.content}def llm_call_router(state: State):# 結構化輸出router = llm.with_structured_output(Route, method="function_calling")print(f"state:{state}")decision = router.invoke([SystemMessage(content="你是一個寫作風格分類器。請根據輸入的主題判斷最適合的寫作風格。"),HumanMessage(content=state['input'])])print(f"decision:{decision}")return {'decision': decision.step}# 條件邊
def router_decision(state: State):if state['decision'] == '議論文':return "llm_call_1"elif state['decision'] == '記敘文':return "llm_call_2"elif state['decision'] == '散文':return "llm_call_3"workflow = StateGraph(State)# 添加節點
workflow.add_node("llm_call_1", llm_call_1)
workflow.add_node("llm_call_2", llm_call_2)
workflow.add_node("llm_call_3", llm_call_3)
workflow.add_node("llm_call_router", llm_call_router)# 添加邊
workflow.add_edge(START, "llm_call_router")
workflow.add_conditional_edges("llm_call_router",router_decision,{"llm_call_1": "llm_call_1","llm_call_2": "llm_call_2","llm_call_3": "llm_call_3"})
workflow.add_edge("llm_call_1", END)
workflow.add_edge("llm_call_2", END)
workflow.add_edge("llm_call_3", END)graph = workflow.compile()response = graph.invoke({"input": "天邊的云"})
print(f"選擇的文體為:{response['decision']}")
print(f"文章:{response['output']}")
執行結果
常見問題
遇到的問題如下:
結構化輸出這里太難用了 每次都報結構化輸出失敗。。。
router = llm.with_structured_output(Route)
openai.BadRequestError: Error code: 400 - {‘error’: {‘code’: ‘invalid_parameter_error’, ‘param’: None, ‘message’: ‘<400> InternalError.Algo.InvalidParameter: The tool call is not supported.’, ‘type’: ‘invalid_request_error’}, ‘id’: ‘chatcmpl-a711b580-58af-9286-bad1-ddc36b8a44d2’, ‘request_id’: ‘a711b580-58af-9286-bad1-ddc36b8a44d2’}
During task with name ‘llm_call_router’ and id ‘3437df04-e2bc-aac5-f29b-c3417070c369’
原因:
with_structured_output方法對很多大模型沒有適配,原本用的deepseek一直報錯,換成chatgpt之后就沒問題了