人不能轻易否定过去的自己,不能否定过去的选择与价值,那从来不是一场错误。 平淡的日子里,暗流涌动,有人选择潦草涂写,有人选择认真周璇。
从本文开始,会从0到1手写一个AI助手,今天是第一篇,主要完成以下功能:
- 项目搭建,基于langchain框架
- 实现支持一轮问答助手
- 可交互的页面
项目结构
只列举了几个主要的包,其它的都是支撑包
主要类说明
main.py
启动入口,通过gradio提供页面交互,可以通过修改system_message来定制助手类型
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import gradio as gr
from assistant import Assistant
def launch_gradio():
assistant = Assistant(system_message="你是一个擅长数学的助手,能回答所有数学问题")
assistant_launch = gr.ChatInterface(
fn = assistant.ask,
title="小助手",
chatbot=gr.Chatbot(height=500)
)
assistant_launch.launch(share=True,server_name="0.0.0.0")
if __name__ == "__main__":
launch_gradio()
assistant.py
助手主类,该类会根据问题来控制解答的流程,现在只支持通过openai实现单次回答
import json
from model import OpenAIModel
from config import YamlConfig
from chains import OneCallChain
"""
助手类,负责接收输入,调用链,返回输出。
后续可以保存聊天记录、链路由等
"""
class Assistant:
def __init__(self, system_message: str="你是一个通用的生活助理,能回答所有生活中的问题") -> None:
config = YamlConfig()
api_key = config.__getattr__("model")["openai_api_key"]
model = OpenAIModel(model_name="gpt-3.5-turbo",api_key=api_key)
self.one_call_chain = OneCallChain(llm = model.create_chat_model(),system_message=system_message)
def ask(self, question: str, history):
print(history)
return self.one_call_chain.run(question)
one_call_chain.py
一个支持单次回答的链
from langchain_core.prompts import ChatPromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate
from model import BaseModel
from langchain_core.output_parsers import StrOutputParser
from util import LOG
"""
基础聊天链
"""
class OneCallChain:
def __init__(self, llm, system_message: str = "你是一个通用的生活助理,能回答所有生活中的问题"):
system_prompt = SystemMessagePromptTemplate.from_template(system_message)
human_prompt = HumanMessagePromptTemplate.from_template("请回答问题: {question}")
chat_prompt = ChatPromptTemplate.from_messages([system_prompt,human_prompt])
output_parser = StrOutputParser()
self.chain = chat_prompt | llm | output_parser
def run(self, text: str):
return self.chain.invoke({"question":text})
openai.py
openai接口类,可通过修改model_name来修改使用的模型
from langchain_openai import ChatOpenAI
from model import BaseModel
class OpenAIModel(BaseModel):
def __init__(self, model_name: str = "gpt-3.5-turbo", api_key: str = None, verbose: bool = True) -> None:
self.model_name = model_name
self.api_key = api_key
self.verbose = verbose
def create_chat_model(self, model_name: str = "gpt-3.5-turbo", verbose: bool = True):
return ChatOpenAI(
model_name = model_name if model_name is not None else self.model_name,
api_key=self.api_key,
verbose=verbose if verbose is not None else self.verbose,
)
yaml_config.py
系统配置加载类,支持环境区分
import yaml
import os
import sys
class YamlConfig:
_application_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_config_path = f"{_application_dir}/config.yaml"
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(YamlConfig, cls).__new__(cls)
cls._load_all_config(cls._instance,config_path=cls._config_path)
return cls._instance
def _load_all_config(self,config_path):
try:
#加载主配置
config = self.yaml_load(config_path=config_path)
#加载环境配置
env = config["application"]["env"]
if env is None:
raise Exception("未配置环境")
env_config_path = f"{self._application_dir}/config-{env}.yaml"
env_config = self.yaml_load(config_path=env_config_path)
print(env_config)
#合并配置,配置冲突以环境配置为准
config.update(env_config)
self._instance._config = config
except Exception as e:
raise Exception(f"加载应用配置异常。config_path:{config_path},exception:{e}")
def yaml_load(self,config_path):
try:
with open(config_path,"r") as f:
config = yaml.safe_load(f)
return config
except Exception as e:
raise Exception(f"加载配置文件异常。config_path:{config_path},exception:{e}")
def __getattr__(self,name):
if self._instance._config and name in self._instance._config:
return self._instance._config[name]
raise AttributeError(f"配置文件中不存在该属性 {name}")
以上就是AI助手的初级版本,后续会计划加入记忆功能、检索外部数据、加载私有化数据等功能
github:github.com/sanjinbette…
开发环境搭建可参考:juejin.cn/post/734313…