基于langchain和qwen实现一个简单agent

98 阅读3分钟
  • 对agent有了初步的了解
  • 对langchain有了初步的了解
  • python3初探
  • plan-excute 模式
import dashscope
from dashscope.api_entities.dashscope_response import Message
from langchain_core.runnables import RunnableLambda
from langchain.schema import HumanMessage, AIMessage
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain.agents import initialize_agent, AgentType, Tool
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from pydantic import ConfigDict
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
from typing import (
    ClassVar,
)
import re
from langchain_core.runnables.history import RunnableWithMessageHistory


# Qwen API 设置
endpoint = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
api_key = "sk-xxxxxxxxxxxxxxxxxx" // 换成自己的api key

# Qwen 调用函数
def qwen_model_call(messages):
    try:
        formatted_messages = []
        for msg in messages:
            if isinstance(msg, HumanMessage):
                formatted_messages.append(Message("user", msg.content))
            elif isinstance(msg, AIMessage):
                formatted_messages.append(Message("assistant", msg.content))

        response = dashscope.Generation.call(
            api_key=api_key,
            model="qwen-plus",
            messages=formatted_messages,
            result_format='message'
        )
        if response.status_code != 200:
            raise Exception(f"Model returned an error: {response.message}")
        return AIMessage(content=response.output.choices[0].message.content)
    except Exception as e:
        return AIMessage(content="抱歉,我没能生成回应。")  # 返回一个默认回应

# 初始化 ChatMessageHistory
history = InMemoryChatMessageHistory()

def get_session_history():
    # 获取历史消息,确保返回的是正确格式
    return history  # 返回消息列表

# 创建一个新的 RunnableLambda,用于传递消息并调用模型
conversation = RunnableWithMessageHistory(
    runnable=RunnableLambda(func=qwen_model_call),
    get_session_history=get_session_history
)

# 工具函数示例
def search_knowledge_base(query):
    # 这里是调用知识库检索的逻辑,可以连接向量数据库或 API
    return f"这是针对 '{query}' 的检索结果。"

# 工具函数示例
def qwen_execute(query):
    # 创建消息并传递给模型
    human_message = HumanMessage(content=query)

    # 调用 conversation 生成回复
    response = conversation.invoke([human_message])
    # 调试:查看当前历史记录
    # print("Debug: Current history after invoke:", history.messages)

    return response.content


search_tool = {
    "知识库检索": Tool(
        name="知识库检索",
        func=search_knowledge_base,
        description="从知识库中检索信息。"
    ),
    "qwen检索": Tool(
        name="qwen检索",
        func=qwen_execute,
        description="qwen检索"
    )
}

class Planner(BasePlanner):
    model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", validate_assignment=True, arbitrary_types_allowed=True)
    def __init__(self, conversation_model:RunnableWithMessageHistory):
        super().__init__()  # 调用父类的初始化方法
        self.conversation_model = conversation_model  # 存储 conversation_model

    def plan(self, inputs: dict):
        # 验证 inputs 必须是 dict
        if not isinstance(inputs, dict) or "content" not in inputs:
            raise ValueError("计划输入必须是包含 'content' 键的字典类型")

        prompt = (
            "你是一名助手,请将用户的问题分解成多个简单的任务计划。"
            "每个任务步骤用序号标识,例如:\n"
            "1. 搜索相关信息\n2. 汇总结果\n\n用户输入:"
        )
        # 创建一个完整的输入,将提示和用户输入结合
        planning_input = [HumanMessage(content=prompt + inputs["content"])]
        # 调用对话模型生成计划
        response = self.conversation_model.invoke(planning_input)
        # 清洗和检查步骤格式
        steps = response.content.strip().split("\n")

        # 筛选出真正的任务
        parsed_steps = []
        for step in steps:
            if len(step) == 0:
                continue
            if re.match(r"^\d+.\s", step.strip()):
                parsed_steps.append(step)
        return {
            "parsed_steps": parsed_steps,
            "origin_steps": steps,
        }


    async def aplan(self, inputs: dict):
        return self.plan(inputs)  # 异步实现可以调用同步方法


class Executor(BaseExecutor):
    model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", validate_assignment=True, arbitrary_types_allowed=True)

    def __init__(self, tool_list):
        super().__init__()  # 调用父类的初始化方法
        self.tools = tool_list

    def step(self, step: dict):
        """同步执行步骤"""
        for tool_name, param in step.items():
            if tool_name in self.tools:
                return self.tools[tool_name].func(param)
        return f"无法执行步骤:{step}"

    async def astep(self, step: dict):
        """异步执行步骤"""
        return  self.step(step)

# 初始化 Plan-and-Execute Agent
planner = Planner(conversation)
executor = Executor(search_tool)
plan_and_execute_agent = PlanAndExecute(planner=planner, executor=executor)

# 主函数
def chat_with_agent():
    print("温柔一刀: 我来了!有什么想和我聊的吗?(输入 'exit' 结束聊天)")

    while True:
        user_input = input("You: ")

        if user_input.lower() == 'exit':
            print("温柔一刀: bye")
            break

        # 创建用户消息
        human_message = HumanMessage(content=user_input)
        # 将用户输入转为字典
        inputs = {"content": human_message.content}
        # 调用 Plan-and-Execute Agent
        try:
            plan_result = plan_and_execute_agent.planner.plan(inputs)
            if len(plan_result["parsed_steps"]) == 0 :
                for step in plan_result["origin_steps"]:
                    print("温柔一刀:", step)
                continue
            print("温柔一刀: 我来帮你分解问题,计划如下:")
            for step in plan_result["parsed_steps"]:
                print(f" - {step}")
            print("温柔一刀: 具体步骤如下:")
            for step in plan_result["parsed_steps"]:
                result = plan_and_execute_agent.executor.step({"qwen检索": step})
                print(f"'{step}': {result}")
        except Exception as e:
            print("温柔一刀: 抱歉,执行任务时遇到问题:", str(e))

# 启动聊天 Agent
chat_with_agent()