every blog every motto: You can do more than you think.
https://blog.csdn.net/weixin_39190382?type=blog
0. 前言
langgraph 基礎
1. Chatbot實現
# !pip install langchain
# !pip install langgraph
from typing import Annotatedfrom typing_extensions import TypedDictfrom langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages# 定義 State
class State(TypedDict):# 狀態變量 messages 類型是 list,更新方式是 add_messages# add_messages 是內置的一個方法,將新的消息列表追加在原列表后面messages: Annotated[list, add_messages]# 創建 Graph
graph_builder = StateGraph(State)
from langchain.chat_models import init_chat_model
# llm = init_chat_model("gpt-4o", model_provider="openai")
llm = init_chat_model("deepseek-chat", model_provider="deepseek")# 定義一個執行節點
# 輸入是 State,輸出是系統回復
def chatbot(state: State):# 調用大模型,并返回消息(列表)# 返回值會觸發狀態更新 add_messagesreturn {"messages": [llm.invoke(state["messages"])]}graph_builder.add_node("chatbot", chatbot)
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("chatbot", END)graph = graph_builder.compile()
Adding a node to a graph that has already been compiled. This will not be reflected in the compiled graph.---------------------------------------------------------------------------ValueError Traceback (most recent call last)Cell In[9], line 127 def chatbot(state: State):8 # 調用大模型,并返回消息(列表)9 # 返回值會觸發狀態更新 add_messages10 return {"messages": [llm.invoke(state["messages"])]}
---> 12 graph_builder.add_node("chatbot", chatbot)13 graph_builder.add_edge(START, "chatbot")14 graph_builder.add_edge("chatbot", END)File c:\Users\13010\miniconda3\envs\py12\Lib\site-packages\langgraph\graph\state.py:456, in StateGraph.add_node(self, node, action, defer, metadata, input_schema, retry_policy, cache_policy, destinations, **kwargs)454 raise RuntimeError455 if node in self.nodes:
--> 456 raise ValueError(f"Node `{node}` already present.")457 if node == END or node == START:458 raise ValueError(f"Node `{node}` is reserved.")ValueError: Node `chatbot` already present.
from IPython.display import Image, display# 可視化展示這個工作流
try:display(Image(data=graph.get_graph().draw_mermaid_png()))
except Exception as e:print(e)
from langchain.schema import AIMessage def stream_graph_updates(user_input: str):# 向 graph 傳入一條消息(觸發狀態更新 add_messages)for event in graph.stream({"messages": [{"role": "user", "content": user_input}]}):for value in event.values():if "messages" in value and isinstance(value["messages"][-1], AIMessage):print("Assistant:", value["messages"][-1].content)def run():# 執行這個工作流while True:user_input = input("User: ")if user_input.strip() == "":breakstream_graph_updates(user_input)
run()
Assistant: 你好!😊 很高興見到你~有什么我可以幫你的嗎?
Assistant: 我是DeepSeek Chat,由深度求索公司(DeepSeek)研發的智能AI助手!🤖? 我的使命是幫助你解答問題、提供信息、陪你聊天,甚至幫你處理各種文本和文件。無論是學習、工作,還是日常生活中的疑問,都可以來問我!😊 有什么我可以幫你的嗎?
2. RAG
# !pip install -U langchain-community pymupdf
# !pip install dashscope
# !pip install faiss-cpu
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyMuPDFLoader# 加載文檔
loader = PyMuPDFLoader("./data/deepseek-v3-1-4.pdf")
pages = loader.load_and_split()# 文檔切分
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512,chunk_overlap=200,length_function=len,add_start_index=True,
)texts = text_splitter.create_documents([page.page_content for page in pages[:2]]
)# 灌庫
embeddings = DashScopeEmbeddings(model="text-embedding-v1")
db = FAISS.from_documents(texts, embeddings)# 檢索 top-5 結果
retriever = db.as_retriever(search_kwargs={"k": 5})
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate# Prompt模板
template = """請根據對話歷史和下面提供的信息回答上面用戶提出的問題:
{query}
"""
prompt = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.from_template(template),]
)
def retrieval(state: State):user_query = ""if len(state["messages"]) >= 1:# 獲取最后一輪用戶輸入user_query = state["messages"][-1]else:return {"messages": []}# 檢索docs = retriever.invoke(str(user_query))# 填 prompt 模板messages = prompt.invoke("\n".join([doc.page_content for doc in docs])).messagesreturn {"messages": messages}
graph_builder = StateGraph(State)
graph_builder.add_node("retrieval", retrieval)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_edge(START, "retrieval")
graph_builder.add_edge("retrieval","chatbot")
graph_builder.add_edge("chatbot", END)graph = graph_builder.compile()
from IPython.display import Image, display# 可視化展示這個工作流
try:display(Image(data=graph.get_graph().draw_mermaid_png()))
except Exception as e:print(e)
run()
Assistant: DeepSeek-V3 是一個大型混合專家模型(Mixture-of-Experts, MoE),總參數量為 **6710 億(671B)**,其中每個 token 激活的參數量為 **370 億(37B)**。
3. 加入分支:若找不到答案轉人工
from langchain.schema import HumanMessage
from typing import Literal
from langgraph.types import interrupt, Command# 校驗
def verify(state: State)-> Literal["chatbot","ask_human"]:message = HumanMessage("請根據對話歷史和上面提供的信息判斷,已知的信息是否能夠回答用戶的問題。直接輸出你的判斷'Y'或'N'")ret = llm.invoke(state["messages"]+[message])if 'Y' in ret.content:return "chatbot"else:return "ask_human"# 人工處理
def ask_human(state: State):user_query = state["messages"][-2].contenthuman_response = interrupt({"question": user_query})# Update the state with the human's input or route the graph based on the input.return {"messages": [AIMessage(human_response)]}
from langgraph.checkpoint.memory import MemorySaver# 用于持久化存儲 state (這里以內存模擬)
# 生產中可以使用 Redis 等高性能緩存中間件
memory = MemorySaver()graph_builder = StateGraph(State)graph_builder.add_node("retrieval", retrieval)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_node("ask_human", ask_human)graph_builder.add_edge(START, "retrieval")
graph_builder.add_conditional_edges("retrieval", verify)
graph_builder.add_edge("ask_human", END)
graph_builder.add_edge("chatbot", END)# 中途會被轉人工打斷,所以需要 checkpointer 存儲狀態
graph = graph_builder.compile(checkpointer=memory)
from langchain.schema import AIMessage # 當使用 checkpointer 時,需要配置讀取 state 的 thread_id
# 可以類比 OpenAI Assistants API 理解,或者想象 Redis 中的 key
thread_config = {"configurable": {"thread_id": "my_thread_id"}}def stream_graph_updates(user_input: str):# 向 graph 傳入一條消息(觸發狀態更新 add_messages)for event in graph.stream({"messages": [{"role": "user", "content": user_input}]},thread_config):for value in event.values():if isinstance(value, tuple):return value[0].value["question"]elif "messages" in value and isinstance(value["messages"][-1], AIMessage):print("Assistant:", value["messages"][-1].content)return Nonereturn Nonedef resume_graph_updates(human_input: str):for event in graph.stream(Command(resume=human_input), thread_config, stream_mode="updates"):for value in event.values():if "messages" in value and isinstance(value["messages"][-1], AIMessage):print("Assistant:", value["messages"][-1].content)
def run():# 執行這個工作流while True:user_input = input("User: ")if user_input.strip() == "":breakquestion = stream_graph_updates(user_input)if question:human_answer = input("Ask Human: "+question+"\nHuman: ")resume_graph_updates(human_answer)
from IPython.display import Image, display# 可視化展示這個工作流
try:display(Image(data=graph.get_graph().draw_mermaid_png()))
except Exception as e:print(e)
run()
Assistant: DeepSeek-V3 是一個大型混合專家模型(MoE),總參數量為 **6710億(671B)**,其中每個 token 激活的參數量為 **370億(37B)**。 (根據論文 arXiv:2412.19437v2 提供的信息,DeepSeek-V3 的架構明確標注為 671B 參數規模。)
Assistant: 090
LangGraph 還支持:
- 工具調用
- 并行處理
- 狀態持久化
- 對話歷史管理
- 歷史動作回放(用于調試與測試)
- 子圖管理
- 多智能體協作
- …
更多關于 LangGraph 的 HowTo,參考官方文檔:https://langchain-ai.github.io/langgraph/how-tos