from langchain_community.document_loaders import PyPDFLoader
from pathlib import Pathdef load_local_pdf(file_path):if not Path(file_path).exists():raise FileNotFoundError(f"文件 {file_path} 不存在!")loader = PyPDFLoader(file_path)try:docs = loader.load()print(f"成功加載 {len(docs)} 頁 | 首頁內容片段: {docs[0].page_content[:200]}...")return docsexcept Exception as e:print(f"加載失敗: {str(e)}")return None# For openai key
import os
os.environ["OPENAI_API_KEY"] = "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"# 1. 初始化OpenAI模型
from langchain_openai.chat_models import ChatOpenAIllm = ChatOpenAI(model_name="gpt-4o-mini")# 測試OpenAI調用
response = llm.invoke("獎懲的原則是什么?")
print(response.content)# 2. 加載PDF文檔
from langchain_community.document_loaders import PyPDFLoader# Or download the paper and put a path to the local file instead
# loader = PyPDFLoader("https://arxiv.org/pdf/2402.03216")
# docs = loader.load()
# print(docs[0].metadata)local_docs = load_local_pdf("C:\\員工獎懲管理辦法.pdf")# 3. 分割文本
from langchain.text_splitter import RecursiveCharacterTextSplitter# initialize a splitter
# 配置智能分割器
splitter = RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=200, # 增加重疊比例separators=["\n\n", "。", "\n", " ", ""], # 優化分隔符優先級length_function=len,add_start_index=True # 記錄起始位置
)# use the splitter to split our paper
corpus = splitter.split_documents(local_docs)
print(f"分割后文檔數: {len(corpus)} | 首塊內容示例:\n{corpus[0].page_content[:200]}...")# 4. 初始化嵌入模型
from langchain_huggingface.embeddings import HuggingFaceEmbeddings# 指定本地模型路徑
model_path = "./models/bge-large-zh-v1.5"
# embedding_model = HuggingFaceEmbeddings(model_name="BAAI/bge-large-zh-v1.5", encode_kwargs={"normalize_embeddings": True})
embedding_model = HuggingFaceEmbeddings(model_name=model_path, # 直接指向本地路徑encode_kwargs={"normalize_embeddings": True},model_kwargs={"local_files_only": True} # 強制從本地加載
)# 5. 構建向量數據庫
from langchain_community.vectorstores import FAISSvectordb = FAISS.from_documents(corpus, embedding_model)# (optional) save the vector database to a local directory
# 保存向量庫(確保目錄權限)
if not os.path.exists("vectorstore.db"):vectordb.save_local("vectorstore.db")
print("向量數據庫已保存")# 6. 創建檢索鏈
from langchain_core.prompts import ChatPromptTemplatetemplate = """
You are a Q&A chat bot.
Use the given context only, answer the question.<context>
{context}
</context>Question: {input}
"""# Create a prompt template
prompt = ChatPromptTemplate.from_template(template)from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chaindoc_chain = create_stuff_documents_chain(llm, prompt)
# Create retriever for later use
retriever = vectordb.as_retriever(search_kwargs={"k": 3}) # 調整檢索數量
chain = create_retrieval_chain(retriever, doc_chain)# 7. 執行查詢
response = chain.invoke({"input": "獎懲的原則是什么?"})# print the answer only
print("\n答案:", response['answer'])
模型下載參考上一篇文章:?使用huggingface-cli下載模型