1 RAG介绍
检索增强生成(RAG)技术之所以出现,是为了解决大型语言模型在处理知识更新和扩展时面临的挑战。它通过外部信息检索的方式,不改变模型权重,而是向模型提供额外的上下文或知识,类似于人类通过查阅文档来临时记忆信息,从而在不增加训练成本的情况下增强模型的知识处理能力。
2 API部署RAG应用
2.1 环境安装
conda create -n llamaindex python=3.10
conda activate llamaindex
pip install einops==0.7.0 protobuf==5.26.1
pip install llama-index==0.11.20
pip install llama-index-llms-replicate==0.3.0
pip install llama-index-llms-openai-like==0.2.0
pip install llama-index-embeddings-huggingface==0.3.1
pip install llama-index-embeddings-instructor==0.2.1
pip install llama-index-embeddings-huggingface==0.2.0 llama-index-embeddings-instructor==0.1.3
pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu121
2.2 下载嵌入模型:
通过hf-mirror下载到/root/model/命令下:
import os
# 设置环境变量
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
# 下载模型
os.system('huggingface-cli download --resume-download sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2 --local-dir /root/model/sentence-transformer')
或者使用魔搭下载:
git lfs install
cd /root/model/
git clone https://www.modelscope.cn/Ceceliachenen/paraphrase-multilingual-MiniLM-L12-v2.git
mv paraphrase-multilingual-MiniLM-L12-v2 sentence-transformer
2.3 下载nltk资源
llamaindex用到了nltk,可以使用国内的gitte下载:
cd /root
git clone https://gitee.com/yzy0612/nltk_data.git --branch gh-pages
cd nltk_data
mv packages/* ./
cd tokenizers
unzip punkt.zip
cd ../taggers
unzip averaged_perceptron_tagger.zip
2.4 使用InternLM API回答
from openai import OpenAI
base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
api_key = "sk-请填写准确的 token!"
model="internlm2.5-latest"
# base_url = "https://api.siliconflow.cn/v1"
# api_key = "sk-请填写准确的 token!"
# model="internlm/internlm2_5-7b-chat"
client = OpenAI(
api_key=api_key ,
base_url=base_url,
)
q = "水滴撞的第一艘地球战舰是什么"
print(f"question: {q}")
chat_rsp = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content":q}],
)
print("answer:")
for choice in chat_rsp.choices:
print(choice.message.content)
结果为:
模型没有正确回答这个问题,接下来测试rag的效果。
2.5 RAG回答
首先获取三体第二部《黑暗森林》的原文:
mkdir data && cd data
wget -O tmp.txt https://raw.githubusercontent.com/aboutjm/Automation/master/book/%5B%E4%B8%89%E4%BD%931-3%2B%E4%B8%89%E4%BD%93X%E4%BF%AE%E8%AE%A2%E5%A2%9E%E8%A1%A5%5DTXT%E7%B2%BE%E6%A0%A1%E7%89%88.%E5%88%98%E6%85%88%E6%AC%A3/%E4%B8%89%E4%BD%932%E9%BB%91%E6%9A%97%E6%A3%AE%E6%9E%97.txt
iconv -f GBK -t UTF-8 tmp.txt -o threebody2.txt && rm tmp.txt && cd ..
接下来使用llamaindex调用浦语的API来完成rag:
import os
os.environ['NLTK_DATA'] = '/root/nltk_data'
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.settings import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.legacy.callbacks import CallbackManager
from llama_index.llms.openai_like import OpenAILike
# Create an instance of CallbackManager
callback_manager = CallbackManager()
api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
model = "internlm2.5-latest"
api_key = "请填写 API Key"
# api_base_url = "https://api.siliconflow.cn/v1"
# model = "internlm/internlm2_5-7b-chat"
# api_key = "请填写 API Key"
llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
#初始化一个HuggingFaceEmbedding对象,用于将文本转换为向量表示
embed_model = HuggingFaceEmbedding(
#指定了一个预训练的sentence-transformer模型的路径
model_name="/root/model/sentence-transformer"
)
#将创建的嵌入模型赋值给全局设置的embed_model属性,
#这样在后续的索引构建过程中就会使用这个模型。
Settings.embed_model = embed_model
#初始化llm
Settings.llm = llm
#从指定目录读取所有文档,并加载数据到内存中
documents = SimpleDirectoryReader("/root/llamaindex_demo/data").load_data()
#创建一个VectorStoreIndex,并使用之前加载的文档来构建索引。
# 此索引将文档转换为向量,并存储这些向量以便于快速检索。
index = VectorStoreIndex.from_documents(documents)
# 创建一个查询引擎,这个引擎可以接收查询并返回相关文档的响应。
query_engine = index.as_query_engine()
q = "水滴撞的第一艘地球战舰是什么"
print(f"question: {q}")
query_engine = index.as_query_engine()
response = query_engine.query(q)
print("answer:")
print(response)
rag的回答结果如下:
水滴撞击的第一艘地球战舰是“无限边疆”号。在文中提到,水滴在击穿“无限边疆”号后,继续前进并贯穿了联合舰队矩形阵列的第一队列中的一百艘战舰。这表明“无限边疆”号是水滴攻击的第一个目标。
而原文中确实第一艘被水滴撞毁战舰是“无限边疆”:
2.6 webui
修改下面app.py中的api_key和embedding模型路径
import streamlit as st
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.legacy.callbacks import CallbackManager
from llama_index.llms.openai_like import OpenAILike
# Create an instance of CallbackManager
callback_manager = CallbackManager()
api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
model = "internlm2.5-latest"
api_key = "请填写 API Key"
# api_base_url = "https://api.siliconflow.cn/v1"
# model = "internlm/internlm2_5-7b-chat"
# api_key = "请填写 API Key"
llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
st.title("llama_index_demo")
# 初始化模型
@st.cache_resource
def init_models():
embed_model = HuggingFaceEmbedding(
model_name="/root/model/sentence-transformer"
)
Settings.embed_model = embed_model
#用初始化llm
Settings.llm = llm
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
return query_engine
# 检查是否需要初始化模型
if 'query_engine' not in st.session_state:
st.session_state['query_engine'] = init_models()
def greet2(question):
response = st.session_state['query_engine'].query(question)
return response
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
# Function for generating LLaMA2 response
def generate_llama_index_response(prompt_input):
return greet2(prompt_input)
# User-provided prompt
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Gegenerate_llama_index_response last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_llama_index_response(prompt)
placeholder = st.empty()
placeholder.markdown(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message)
启动steamlit app:
streamlit run app.py
3 本地部署RAG应用
3.1 下载模型
接下来使用InternLM2-Chat-1.8B模型来测试RAG的效果,首先下载模型:
import os # 设置环境变量 os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' # 下载模型 os.system('huggingface-cli download --resume-download internlm/internlm2-chat-1_8b --local-dir /root/model/internlm2-chat-1_8b')
3.2 lmdeploy安装和部署
pip install lmdeploy==0.6.1 openai==1.52.0
lmdeploy serve api_server /root/model/internlm2-chat-1_8 --server-port 23333
成功部署以后会看到如下输出:
3.3 InternLM2_1.8B 直接回答
from openai import OpenAI
model_name = "/root/model/internlm2-chat-1_8b"
client = OpenAI(
api_key='sk-123', # 随便填什么
base_url="http://127.0.0.1:23333/v1",
)
q = "水滴撞的第一艘地球战舰是什么"
print(f"question: {q}")
chat_rsp = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content":q}],
)
print("answer:")
for choice in chat_rsp.choices:
print(choice.message.content)
模型同样无法回答该问题,训练数据中没有三体:
此时看lmdeploy的日志发现确实是部署的InternLM2_1.8B回答的:
3.4 InternLM2_1.8B RAG回答
from llama_index.llms.openai_like import OpenAILike
api_base_url = "http://127.0.0.1:23333/v1"
model = "/root/model/internlm2-chat-1_8b"
api_key = "sk-123"
llm = OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
#初始化llm
Settings.llm = llm