【案例二】支持搜索的智能代理系统1.案例介绍2.编码思路a.设置Nodesb.设置Statefrom langchain.messages import AnyMessage from typing_extensions import TypedDict, Annotated import operator class MessagesState(TypedDict): # 类型: list[AnyMessage] - 任意消息对象的列表 # 合并策略: operator.add - 使用加法操作符进行状态合并 # 效果: 当状态更新时新的消息会追加到现有列表中而不是替换 messages: Annotated[list[AnyMessage], operator.add] # 类型: int - 整数值 # 用途: 跟踪LLM大语言模型的调用次数 llm_calls: intmessages [ HumanMessage(content你好), AIMessage(content你好我是AI助手), HumanMessage(content什么是机器学习), AIMessage(content机器学习是...) ]# LLM 基于完整的对话历史⽣成回复 response llm.invoke(state[messages])def node(state: MessagesState): # 可以访问完整的对话历史 all_messages state[messages] latest_message state[messages][-1] # 处理并添加新消息 return {messages: [new_ai_message]}c.设置Edges3.代码实现a.步骤1准备工作定义聊天模型和搜索工具from typing import TypedDict, Annotated import operator from langchain.chat_models import init_chat_model from langchain_core.messages import AnyMessage from langchain_tavily import TavilySearch #准备工作 serach TavilySearch(max_results4) tools [serach] model init_chat_model(glm-4,temperature0) model_with_tools model.bind_tools(tools)b.步骤2定义状态#1.状态定义 class MessageState(TypedDict): #消息列表 message: Annotated[list[AnyMessage],operator.add] #调用LLM次数 llm_calls: intc.步骤3定义模型节点#2.定义节点 def llm_calls(state: MessageState): LLM决定是否调用工具 #由于当前节点可能是start过来的,也可能是工具过来的 #因此state[message]可能是[H],[H,A,T] messages state[messages] #带tool_calls或不带tool_calls的AIMessage result model_with_tools.invoke( [ SystemMessage(content你是一个乐于助人的助手,支持调用工具进行搜索), ] messages ) return { messages: [result], llm_calls: state[llm_calls] 1 }d.步骤4定义工具节点回顾要点1回顾AIMessage消息结构要点2构造ToolMessage要点3在State中访问messagestools_by_name {tool.name:tool for tool in tools} def tool_node(state: MessageState): 执行工具调用节点 #result 就是 toolmessage result [] for tool_call in state[messages][-1].tool_calls: #获取name,args,id... tool tools_by_name[tool_call[name]] obs tool.invoke(tool_call[args]) result.append(ToolMessage(contentobs,tool_call_idtool_call[id])) return { messages: result }e.步骤5构建图设置节点与边#3.定义图,添加节点和边 agent_builder StateGraph(MessageState) agent_builder.add_node(llm_calls) agent_builder.add_node(tool_node) agent_builder.add_edge(START,llm_calls) def should_continue(state: MessageState): #最新消息是AIMessage,判断是否带有tool_calls last_messages state[messages][-1] if last_messages.tool_calls: return tool_node else: return END agent_builder.add_conditional_edges( llm_calls, should_continue, [tool_node,END] ) agent_builder.add_edge(tool_node,llm_calls) agent_search agent_builder.compile()f.步骤6可视化图绘图工具链接:https://www.jyshare.com/front-end/9729/import matplotlib.pyplot as plt import matplotlib.image as mpimg try: # 生成 Mermaid 图表并保存为图片 mermaid_code agent.get_graph(xrayTrue).draw_mermaid_png() # 保存文件 with open(../jpg/graph1.jpg, wb) as f: f.write(mermaid_code) # 使用 matplotlib 显示图像 img mpimg.imread(../jpg/graph1.jpg) plt.imshow(img) # 显示图片 plt.axis(off) # 关闭坐标轴 plt.show() # 弹出窗口显示图片 except Exception as e: print(fAn error occurred: {e})我们能直观的看到,我们的代码写的对不对这个得到的就是我们对应的图生成的代码g.步骤7执行非流式与流式result agent_search.invoke({ messages: [HumanMessage(content今天北京的天气如何)], llm_calls: 0 }) print(f一共调用了 {result[llm_calls]} 次LLM) for msg in result[messages]: msg.pretty_print()4.总代码from typing import TypedDict, Annotated import operator from langchain_core.messages import AnyMessage, SystemMessage, ToolMessage, HumanMessage from langchain_tavily import TavilySearch from langgraph.constants import START, END from langgraph.graph import StateGraph from langchain_openai import ChatOpenAI # 终极兼容补丁必须放在最顶部 import langchain langchain.verbose False langchain.debug False langchain.llm_cache None # import os api_key os.getenv(ZHIPUAI_API_KEY) model ChatOpenAI( modelglm-5, api_keyapi_key, base_urlhttps://open.bigmodel.cn/api/paas/v4/, # 智谱官方接口 temperature0 ) # 工具 search TavilySearch(max_results4) tools [search] model_with_tools model.bind_tools(tools) # 状态 class MessageState(TypedDict): messages: Annotated[list[AnyMessage], operator.add] llm_calls: int # LLM 节点 def llm_calls(state: MessageState): messages state[messages] result model_with_tools.invoke( [SystemMessage(content你是一个乐于助人的助手支持调用工具搜索)] messages ) return { messages: [result], llm_calls: state[llm_calls] 1 } # 工具节点 tools_by_name {tool.name: tool for tool in tools} def tool_node(state: MessageState): result [] for tool_call in state[messages][-1].tool_calls: tool tools_by_name[tool_call[name]] obs tool.invoke(tool_call[args]) result.append(ToolMessage(contentobs, tool_call_idtool_call[id])) return {messages: result} # 构建图 agent_builder StateGraph(MessageState) agent_builder.add_node(llm_calls, llm_calls) agent_builder.add_node(tool_node, tool_node) agent_builder.add_edge(START, llm_calls) def should_continue(state: MessageState): last_message state[messages][-1] return tool_node if last_message.tool_calls else END agent_builder.add_conditional_edges(llm_calls, should_continue, [tool_node, END]) agent_builder.add_edge(tool_node, llm_calls) # 运行 agent_search agent_builder.compile() import matplotlib.pyplot as plt import matplotlib.image as mpimg try: # 生成 Mermaid 图表并保存为图片 mermaid_code agent_search.get_graph(xrayTrue).draw_mermaid_png() # 保存文件 with open(../jpg/graph1.jpg, wb) as f: f.write(mermaid_code) # 使用 matplotlib 显示图像 img mpimg.imread(../jpg/graph1.jpg) plt.imshow(img) # 显示图片 plt.axis(off) # 关闭坐标轴 plt.show() # 弹出窗口显示图片 except Exception as e: print(fAn error occurred: {e}) result agent_search.invoke({ messages: [HumanMessage(content今天北京的天气如何)], llm_calls: 0 }) print(f一共调用了 {result[llm_calls]} 次LLM) for msg in result[messages]: msg.pretty_print()