Files
SmartVoyage/_demo/function_call/json_schema.py
2026-03-20 22:56:24 +08:00

115 lines
3.5 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, ToolMessage
from conf import settings
# todo: 第一步:定义工具函数
def add(a: int, b: int) -> int:
"""
将数字a与数字b相加
Args:
a: 第一个数字
b: 第二个数字
"""
return a + b
def multiply(a: int, b: int) -> int:
"""
将数字a与数字b相乘
Args:
a: 第一个数字
b: 第二个数字
"""
return a * b
tools = [
{
"type": "function",
"function": {
"name": "add",
"description": "将数字a与数字b相加",
"parameters": {
"type": "object",
"properties": {
"a": {
"type": "integer",
"description": "第一个数字"
},
"b": {
"type": "integer",
"description": "第二个数字"
}
},
"required": ["a", "b"]
}
}
},
{
"type": "function",
"function": {
"name": "multiply",
"description": "将数字a与数字b相乘",
"parameters": {
"type": "object",
"properties": {
"a": {
"type": "integer",
"description": "第一个数字"
},
"b": {
"type": "integer",
"description": "第二个数字"
}
},
"required": ["a", "b"]
}
}
}
]
# todo: 第二步:初始化模型
llm = ChatOpenAI(
base_url=settings.base_url,
api_key=settings.api_key,
model=settings.model_name,
temperature=0.1
)
llm_with_tools = llm.bind(tools=tools, tool_choice="auto")
# todo: 第三步:调用回复
query = "2*10+1"
messages = [HumanMessage(query)]
try:
# todo: 第一次调用
ai_msg = llm_with_tools.invoke(messages)
messages.append(ai_msg)
print(f"\n第一轮调用后结果:\n{messages}")
# 处理工具调用
# 判断消息中是否有tool_calls以判断工具是否被调用
if hasattr(ai_msg, "tool_calls") and ai_msg.tool_calls:
for tool_call in ai_msg.tool_calls:
# todo: 处理工具调用
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool(**tool_call["args"])
messages.append(ToolMessage(content=tool_output, tool_call_id=tool_call["id"]))
print(f"\n第二轮 message中增加tool_output 之后:\n{messages}")
# todo: 第二次调用,将工具结果传回模型以生成最终回答
final_response = llm_with_tools.invoke(messages)
print(f"\n最终模型响应:\n{final_response.content}")
else:
print("模型未生成工具调用,直接返回文本:")
print(ai_msg.content)
except Exception as e:
print(f"模型调用失败: {str(e)}")
# llm.invoke(messages, tools=tools, ...):
# 绑定方式: 直接在 .invoke() 调用中传入 tools 参数。这是一种临时、一次性的绑定方式,仅对本次调用有效。
# 调用方式: 如果你想再次调用模型并使用工具,你必须在下一次 .invoke() 调用中再次传递 tools 参数。
# 适用场景: 适用于简单、单次的工具调用需求,