參考:ollama兼容OpenAIEmbeddings的解決思路
解決代碼:
訪問embedding模型代碼
# 測試以下兩個引用都可以
from langchain_openai import OpenAIEmbeddings
#from langchain_community.embeddings import OpenAIEmbeddings
from typing import List, Optionalclass OllamaCompatibleEmbeddings(OpenAIEmbeddings):def _tokenize(self, texts: List[str], chunk_size: int) -> tuple:"""禁用 Tokenization,直接返回原始文本和索引"""indices = list(range(len(texts)))return (range(0, len(texts), chunk_size), texts, indices)def _get_len_safe_embeddings(self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None) -> List[List[float]]:"""直接傳遞原始文本,跳過 Token 化步驟"""_chunk_size = chunk_size or self.chunk_sizebatched_embeddings: List[List[float]] = []# 直接遍歷原始文本分塊for i in range(0, len(texts), _chunk_size):chunk = texts[i: i + _chunk_size]# 關鍵修改:input 直接使用文本列表response = self.client.create(input=chunk, # 直接使用原始文本列表model=self.model, # 顯式傳遞模型參數**{k: v for k, v in self._invocation_params.items() if k != "model"})if not isinstance(response, dict):response = response.model_dump()batched_embeddings.extend(r["embedding"] for r in response["data"])# 跳過空文本處理(Ollama 不需要)return batched_embeddingsasync def _aget_len_safe_embeddings(self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None) -> List[List[float]]:"""異步版本處理邏輯"""_chunk_size = chunk_size or self.chunk_sizebatched_embeddings: List[List[float]] = []for i in range(0, len(texts), _chunk_size):chunk = texts[i: i + _chunk_size]response = await self.async_client.create(input=chunk,model=self.model,**{k: v for k, v in self._invocation_params.items() if k != "model"})if not isinstance(response, dict):response = response.model_dump()batched_embeddings.extend(r["embedding"] for r in response["data"]) # 注意: 實際應為 "embedding"return batched_embeddingsembeddings = OllamaCompatibleEmbeddings(model="bge-m3:latest",openai_api_base='http://localhost:11434/v1',#說明一下,openai會自己拼接成http://127.0.0.1:11434/v1/embeddingsapi_key="ollama",# 任意字符串即可chunk_size=512)
output = embeddings.embed_query("你好")
print(output)
訪問LLM模型代碼
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage# 配置ChatOpenAI類指向本地Ollama API
llm = ChatOpenAI(model_name="deepseek-r1:1.5b", # 指定Ollama部署的模型名稱openai_api_base="http://localhost:11434/v1", # Ollama API地址api_key="ollama", # 任意字符串,僅用于兼容格式temperature=0.7, # 控制輸出隨機性,0為確定性,1為最大隨機性max_tokens=2000, # 最大生成長度
)# 定義提問函數
def ask_llm(question: str) -> str:"""向本地DeepSeek-R1模型提問并獲取回答"""# 創建HumanMessage對象messages = [HumanMessage(content=question)]# 調用模型生成回答response = llm.invoke(messages)# 提取回答內容return response.content# 測試示例
if __name__ == "__main__":question = "請簡要介紹量子計算的基本原理"answer = ask_llm(question)print(f"問題: {question}")print("\n回答:")print(answer)