''' 狀態:一個共享數據結構,表示應用程序的當前快照。它可以是任何 Python 類型,但通常是 TypedDict 或 Pydantic BaseModel。 節點:Python 函數,用于編碼代理的邏輯。它們以當前 狀態 作為輸入,執行一些計算或副作用,并返回更新后的 狀態。 邊:Python 函數,根據當前 狀態 確定要執行的下一個 節點。它們可以是條件分支或固定轉換。 節點 和 邊 僅僅是 Python 函數 - 它們可以包含 LLM 或普通的 Python 代碼。 節點完成工作,邊指示下一步要做什么. '''
# 創建檢索Tool
from langchain_community.document_loaders import WebBaseLoader # WebBaseLoader獲取博客內容用于RAG urls = ["https://blog.csdn.net/xnuscd/article/details/143474722" ]docs = [WebBaseLoader(url).load() for url in urls]## 文檔切塊 from langchain_text_splitters import RecursiveCharacterTextSplitter docs_list = [item for sublist in docs for item in sublist]text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=100, chunk_overlap=50 ) doc_splits = text_splitter.split_documents(docs_list)#創建檢索 from langchain_core.vectorstores import InMemoryVectorStore from langchain_openai import OpenAIEmbeddingsvectorstore = InMemoryVectorStore.from_documents( # 內存向量存儲documents=doc_splits, embedding=OpenAIEmbeddings() ) retriever = vectorstore.as_retriever() #檢索器 from langchain.tools.retriever import create_retriever_toolretriever_tool = create_retriever_tool( # 檢索Toolretriever,"retrieve_blog_posts","Search and return information about Lilian Weng blog posts.", )
## 綁定Tool,構建模型和生成器
from langgraph.graph import MessagesState from langchain.chat_models import init_chat_model # response_model = init_chat_model("openai:gpt-4.1", temperature=0)def generate_query_or_respond(state: MessagesState):response = ( # 傳入 狀態state的messagesresponse_model.bind_tools([retriever_tool]).invoke(state["messages"]))return {"messages": [response]}# input = { # "messages": [ # { # "role": "user", # "content": "What does Lilian Weng say about types of reward hacking?", # } # ] # } # generate_query_or_respond(input)["messages"][-1].pretty_print() # # ================================== Ai Message ================================== # Tool Calls: # retrieve_blog_posts (call_tYQxgfIlnQUDMdtAhdbXNwIM) # Call ID: call_tYQxgfIlnQUDMdtAhdbXNwIM # Args: # query: types of reward hacking
## 添加一個條件邊 — grade_documents —
??????來判斷檢索到的文檔是否與問題相關
from pydantic import BaseModel, Field from typing import LiteralGrade_Prompt= ("You are a grader assessing relevance of a retrieved document to a user question. \n ""Here is the retrieved document: \n\n {context} \n\n""Here is the user question: {question} \n""If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n""Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question." )class GradeDocuments(BaseModel):""" 相關性評分 yes / no"""binary_score: str = Field(description="Relevance score: 'yes' if relevant, or 'no' if not relevant")grader_model = init_chat_model("openai:gpt-4.1", temperature=0)def grade_documents(state: MessagesState, ) -> Literal["generate_answer", "rewrite_question"]:question = state["messages"][0].contentcontext = state["messages"][-1].contentprompt = Grade_Prompt.format(question=question, context=context)response = (grader_model # Pydantic 結構化輸出.with_structured_output(GradeDocuments).invoke([{"role": "user", "content": prompt}]))score = response.binary_scoreif score == "yes": ## 根據GradeDocuments結果,輸出決策return "generate_answer"else:return "rewrite_question"
## 構建?rewrite_question 節點;generate_answer 節點
即 grade_documents 為No 時 進行rewrite_question ;為Yes 繼續generate_answerREWRITE_PROMPT = ("Look at the input and try to reason about the underlying semantic intent / meaning.\n""Here is the initial question:""\n ------- \n""{question}""\n ------- \n""Formulate an improved question:" )# 構建 rewrite_question 節點, # 如果沒找到相關的文檔 即 GradeDocuments 為No # ,則重新理解用戶問題 def rewrite_question(state: MessagesState):"""Rewrite the original user question."""messages = state["messages"]question = messages[0].contentprompt = REWRITE_PROMPT.format(question=question)response = response_model.invoke([{"role": "user", "content": prompt}])return {"messages": [{"role": "user", "content": response.content}]}GENERATE_PROMPT = ("You are an assistant for question-answering tasks. ""Use the following pieces of retrieved context to answer the question. ""If you don't know the answer, just say that you don't know. ""Use three sentences maximum and keep the answer concise.\n""Question: {question} \n""Context: {context}" ) # 構建 generate_answer 節點;通過GradeDocuments=Yes,根據問題、檢索進行生成 def generate_answer(state: MessagesState):"""Generate an answer."""question = state["messages"][0].contentcontext = state["messages"][-1].contentprompt = GENERATE_PROMPT.format(question=question, context=context)response = response_model.invoke([{"role": "user", "content": prompt}])return {"messages": [response]}
## 組裝 Graph
from langgraph.graph import StateGraph, START, END from langgraph.prebuilt import ToolNode from langgraph.prebuilt import tools_condition# MessagesState:這是一個自定義的狀態結構(通常是TypedDict或pydantic.BaseModel) workflow = StateGraph(MessagesState) #創建工作流,基于MessagesState的狀態圖實例# 添加 Node workflow.add_node(generate_query_or_respond) # 添加一個名為"retrieve"的節點,類型為ToolNode(工具節點),綁定了retriever_tool workflow.add_node("retrieve", ToolNode([retriever_tool])) workflow.add_node(rewrite_question) workflow.add_node(generate_answer) # 添加 edge workflow.add_edge(START, "generate_query_or_respond")# 決定是否要檢索 workflow.add_conditional_edges("generate_query_or_respond",tools_condition, # 條件函數:用該函數判斷下一步{# 條件映射:函數返回值 → 目標節點"tools": "retrieve", # 若條件函數返回"tools",則下一步執行"retrieve"節點END: END,# 若條件函數返回END,則直接結束工作流}, )# 調用"retrieve"節點后 的edge workflow.add_conditional_edges("retrieve", # 起點節點:從檢索節點出發grade_documents,# 條件函數:評估檢索到的文檔是否有效 ) workflow.add_edge("generate_answer", END) # 結束edge workflow.add_edge("rewrite_question", "generate_query_or_respond")# 編譯 graph = workflow.compile()
from IPython.display import Image, displaydisplay(Image(graph.get_graph().draw_mermaid_png()))