- 对agent有了初步的了解
- 对langchain有了初步的了解
- python3初探
- plan-excute 模式
import dashscope
from dashscope.api_entities.dashscope_response import Message
from langchain_core.runnables import RunnableLambda
from langchain.schema import HumanMessage, AIMessage
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain.agents import initialize_agent, AgentType, Tool
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from pydantic import ConfigDict
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
from typing import (
ClassVar,
)
import re
from langchain_core.runnables.history import RunnableWithMessageHistory
endpoint = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
api_key = "sk-xxxxxxxxxxxxxxxxxx" // 换成自己的api key
def qwen_model_call(messages):
try:
formatted_messages = []
for msg in messages:
if isinstance(msg, HumanMessage):
formatted_messages.append(Message("user", msg.content))
elif isinstance(msg, AIMessage):
formatted_messages.append(Message("assistant", msg.content))
response = dashscope.Generation.call(
api_key=api_key,
model="qwen-plus",
messages=formatted_messages,
result_format='message'
)
if response.status_code != 200:
raise Exception(f"Model returned an error: {response.message}")
return AIMessage(content=response.output.choices[0].message.content)
except Exception as e:
return AIMessage(content="抱歉,我没能生成回应。")
history = InMemoryChatMessageHistory()
def get_session_history():
return history
conversation = RunnableWithMessageHistory(
runnable=RunnableLambda(func=qwen_model_call),
get_session_history=get_session_history
)
def search_knowledge_base(query):
return f"这是针对 '{query}' 的检索结果。"
def qwen_execute(query):
human_message = HumanMessage(content=query)
response = conversation.invoke([human_message])
return response.content
search_tool = {
"知识库检索": Tool(
name="知识库检索",
func=search_knowledge_base,
description="从知识库中检索信息。"
),
"qwen检索": Tool(
name="qwen检索",
func=qwen_execute,
description="qwen检索"
)
}
class Planner(BasePlanner):
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", validate_assignment=True, arbitrary_types_allowed=True)
def __init__(self, conversation_model:RunnableWithMessageHistory):
super().__init__()
self.conversation_model = conversation_model
def plan(self, inputs: dict):
if not isinstance(inputs, dict) or "content" not in inputs:
raise ValueError("计划输入必须是包含 'content' 键的字典类型")
prompt = (
"你是一名助手,请将用户的问题分解成多个简单的任务计划。"
"每个任务步骤用序号标识,例如:\n"
"1. 搜索相关信息\n2. 汇总结果\n\n用户输入:"
)
planning_input = [HumanMessage(content=prompt + inputs["content"])]
response = self.conversation_model.invoke(planning_input)
steps = response.content.strip().split("\n")
parsed_steps = []
for step in steps:
if len(step) == 0:
continue
if re.match(r"^\d+.\s", step.strip()):
parsed_steps.append(step)
return {
"parsed_steps": parsed_steps,
"origin_steps": steps,
}
async def aplan(self, inputs: dict):
return self.plan(inputs)
class Executor(BaseExecutor):
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", validate_assignment=True, arbitrary_types_allowed=True)
def __init__(self, tool_list):
super().__init__()
self.tools = tool_list
def step(self, step: dict):
"""同步执行步骤"""
for tool_name, param in step.items():
if tool_name in self.tools:
return self.tools[tool_name].func(param)
return f"无法执行步骤:{step}"
async def astep(self, step: dict):
"""异步执行步骤"""
return self.step(step)
planner = Planner(conversation)
executor = Executor(search_tool)
plan_and_execute_agent = PlanAndExecute(planner=planner, executor=executor)
def chat_with_agent():
print("温柔一刀: 我来了!有什么想和我聊的吗?(输入 'exit' 结束聊天)")
while True:
user_input = input("You: ")
if user_input.lower() == 'exit':
print("温柔一刀: bye")
break
human_message = HumanMessage(content=user_input)
inputs = {"content": human_message.content}
try:
plan_result = plan_and_execute_agent.planner.plan(inputs)
if len(plan_result["parsed_steps"]) == 0 :
for step in plan_result["origin_steps"]:
print("温柔一刀:", step)
continue
print("温柔一刀: 我来帮你分解问题,计划如下:")
for step in plan_result["parsed_steps"]:
print(f" - {step}")
print("温柔一刀: 具体步骤如下:")
for step in plan_result["parsed_steps"]:
result = plan_and_execute_agent.executor.step({"qwen检索": step})
print(f"'{step}': {result}")
except Exception as e:
print("温柔一刀: 抱歉,执行任务时遇到问题:", str(e))
chat_with_agent()