創建一個簡單的工作流:Start ——> 節點1(固定輸入輸出)?——> End
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langgraph.graph import StateGraph, START, END
from typing_extensions import TypedDict
from typing import Annotatedhistory_messages = [SystemMessage(content="你是一個聊天助理"),HumanMessage(content="你好,你能介紹下你自己嗎?"),AIMessage(content="我是一個小模型聊天助理"),
]
messages = history_messages + [HumanMessage(content="今天天氣怎么樣?")]
class MyState(TypedDict):messages:Annotated[list,""]def node1(state:MyState):return {"messages": AIMessage(content="今天天氣很好")}graph_builder = StateGraph(MyState)
graph_builder.add_node("node1", node1)
graph_builder.add_edge(START, "node1")
graph_builder.add_edge("node1", END)
graph = graph_builder.compile()
response = graph.stream({"messages": messages})
for event in response:print(event)
創建一個簡單的工作流:Start ——> 節點1(固定輸入輸出)?——> 節點2(固定輸入輸出)?——> End
同時數據存到Mastate的數組里傳遞給下一個節點
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langgraph.graph import StateGraph, START, END
from typing_extensions import TypedDict
from typing import Annotated
from langgraph.graph.message import add_messageshistory_messages = [SystemMessage(content="你是一個聊天助理"),HumanMessage(content="你好,你能介紹下你自己嗎?"),AIMessage(content="我是一個小模型聊天助理"),
]
messages = history_messages + [HumanMessage(content="今天天氣怎么樣?")]
class MyState(TypedDict):messages:Annotated[list,add_messages]def node1(state:MyState):return {"messages": AIMessage(content="今天天氣很好")}def node2(state:MyState):return {"messages": AIMessage(content="明天天氣也很好")}graph_builder = StateGraph(MyState)
graph_builder.add_node("node1", node1)
graph_builder.add_node("node2", node2)
graph_builder.add_edge("node1", "node2")
graph_builder.add_edge(START, "node1")
graph_builder.add_edge("node2", END)
graph = graph_builder.compile()
response = graph.stream({"messages": messages})
for event in response:print(event)
創建一個簡單的會話工作流:Start ——> 會話節點1?——> End
from langchain.chat_models import init_chat_model
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langgraph.graph import StateGraph, START, END
from typing_extensions import TypedDict
from typing import Annotated
from langgraph.graph.message import add_messagesclass MyState(TypedDict):messages: Annotated[list, add_messages]llm = init_chat_model("deepseek-r1:7b",model_provider="ollama")def chatbot(state: MyState):return {"messages": llm.invoke(state["messages"])}graph_builder = StateGraph(MyState)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_edge(START, "chatbot")
graph_builder.add_edge("chatbot", END)
graph = graph_builder.compile()
response = graph.stream({"messages": "你好"})
for event in response:if "chatbot" in event:value = event["chatbot"]if "messages" in value and isinstance(value["messages"], AIMessage):print(event["chatbot"]["messages"].content)
用langChain創建一個簡單的Rag?:
需要再安裝兩個包:
pip install pypdf
pip install dashscope
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter# 加載文檔
loader = PyPDFLoader("./data/你的PDF文檔.pdf")
pages = loader.load_and_split()# 文檔切片
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512,chunk_overlap=200,length_function=len,add_start_index=True,
)texts = text_splitter.create_documents([page.page_content for page in pages[:2]],
)# 灌庫
embeddings = DashScopeEmbeddings(model="text-embedding-v1",dashscope_api_key="你的阿里云百煉平臺APIkey")
db = FAISS.from_documents(texts, embeddings)# 檢索 top-5 結果
retriever = db.as_retriever(search_kwargs={"k": 5})docs=retriever.invoke("文檔相關的問題")
print(docs)
?作為一個條件節點放到會話工作流會話節點前:Start——> 條件Rag節點?——> 會話節點1?——> End
?
from langchain.chat_models import init_chat_model
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langgraph.graph import StateGraph, MessagesState, START, END
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitterllm = init_chat_model("deepseek-r1:7b",model_provider="ollama")def chatbot(state: MessagesState):return {"messages": llm.invoke(state["messages"])}def retriever(state: MessagesState):question = state["messages"][-1].contentmessages = "請參照一下上下:"# 加載文檔loader = PyPDFLoader("./data/你的PDF文檔.pdf")pages = loader.load_and_split()# 文檔切片text_splitter = RecursiveCharacterTextSplitter(chunk_size=512,chunk_overlap=200,length_function=len,add_start_index=True,)texts = text_splitter.create_documents([page.page_content for page in pages[:2]],)# 灌庫embeddings = DashScopeEmbeddings(model="text-embedding-v1", dashscope_api_key="你的apikey")db = FAISS.from_documents(texts, embeddings)# 檢索 top-5 結果dbRetriever = db.as_retriever(search_kwargs={"k": 5})docs = dbRetriever.invoke(question)for doc in docs:messages = messages + doc.page_contentmessages = messages + "回答問題:" + questionreturn {"messages": HumanMessage(content=messages)}graph_builder = StateGraph(MessagesState)
graph_builder.add_node("retriever", retriever)
graph_builder.add_node("chatbot", chatbot)
graph_builder.add_edge(START, "retriever")
graph_builder.add_edge("retriever", "chatbot")
graph_builder.add_edge("chatbot", END)
graph = graph_builder.compile()
response = graph.stream({"messages": "你的問題?"})
for event in response:if "chatbot" in event:value = event["chatbot"]if "messages" in value and isinstance(value["messages"], AIMessage):print(event["chatbot"]["messages"].content)