RunnableLambda: Run Custom Functions | 🦜?🔗 Langchain
可以在pipeline中使用任意函數,但要注意所有的輸入都只能是“1”個參數,當函數需要多個參數時需要采用字典來包裝
itemgetter用法見langchain學習筆記(六)_runnablepassthrough-CSDN博客
from operator import itemgetter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_community.chat_models import ChatOllama
def single_arg(arg):#單個參數return arg
def _multiple_arg(arg1, arg2):return arg1*arg2
def multiple_arg(_dict):#多個參數需要通過字典包裝,邏輯在調用的函數內實現return _multiple_arg(_dict["arg1"], _dict["arg2"])
prompt = ChatPromptTemplate.from_template("what is {a} + {b}")
model = ChatOllama(model="qwen:0.5b-chat", temperature=0)
chain1 = prompt | model
chain = ({"a": itemgetter("arg1") | RunnableLambda(single_arg),"b": {"arg1": itemgetter("arg1"), "arg2": itemgetter("arg2")}| RunnableLambda(multiple_arg),}| prompt| model
)
print(chain.invoke({'arg1':1,'arg2':2}))
?
RunnableConfig用于將回調、標記和其他配置信息傳遞嵌套運行。
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableConfig,RunnableLambda
import json
def parse_or_fix(text: str, config: RunnableConfig):fixing_chain = (ChatPromptTemplate.from_template("Fix the following text:\n\n```text\n{input}\n```\nError: {error}"" Don't narrate, just respond with the fixed data.")| ChatOpenAI()| StrOutputParser())for _ in range(3):try:return json.loads(text)except Exception as e:text = fixing_chain.invoke({"input": text, "error": e}, config)return "Failed to parse"from langchain.callbacks import get_openai_callback
with get_openai_callback() as cb:output = RunnableLambda(parse_or_fix).invoke("{foo: bar}", {"tags": ["my-tag"], "callbacks": [cb]})print(output)print(cb)