hello world
from openai import OpenAI
import os
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
client = OpenAI(
api_key=OPENAI_API_KEY
)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "什么是js事件循环"}
]
)
print(completion.choices[0].message)
tiktoken
import openai
import tiktoken
encoding = tiktoken.encoding_for_model("gpt-4")
chinese = """在未来还没有到来的时候,总要有人把它创造出来,那个人应该是我们啊。"""
tokens = encoding.encode(chinese)
print(tokens)
num_of_tokens_in_chinese = len(encoding.encode(chinese))
print(f"chinese:{chinese} ; {num_of_tokens_in_chinese} tokens\n") // 37 tokens
LangChain 连接大模型和应用的框架
1. 提示词模板 PromptTemplate
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import AzureChatOpenAI
prompt_template = "What is a good name for a company that makes {product}? And only return the best one."
llm = AzureChatOpenAI(deployment_name = deployment, model_name=model, temperature=0, max_tokens=200)
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
llm_chain.run("colorful socks")
products = [{"product":"'cloudnative devops platform'"},
{"product":"'Noise cancellation headphone'"},
{"product":"colorful socks"}]
llm_chain.apply(products)
2. API调用链 LLMRequestsChain
- 通过API获取信息,然后通过LLM给出最终答案
- 例如:http request chain 使用请求库从url获取html结果,然后使用llm分析结果
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chains import LLMRequestsChain
from langchain.chat_models import AzureChatOpenAI
llm = AzureChatOpenAI(deployment_name = deployment, model_name=model, temperature=0, max_tokens=200)
def query_baidu(question):
template = """Between >>> and <<< are the raw search result text from web.
Extract the answer to the question '{query}' or say "not found" if the information is not contained.
Use the format
Extracted:<answer or "not found">
>>> {requests_result} <<<
Extracted:"""
PROMPT = PromptTemplate(
input_variables=["query", "requests_result"],
template=template,
)
inputs = {
"query": question,
"url": "http://www.baidu.com/s?wd=" + question.replace(" ", "+")
}
requests_chain = LLMRequestsChain(llm_chain = LLMChain(llm=llm, prompt=PROMPT), output_key="query_info", verbose=True)
res = requests_chain.run(inputs)
return res
query_baidu("今天北京天气?")
- 查询百度天气,inputs 接收 url,然后请求完后的结果返回给组装好的llm大模型,根据模板分析出要的结果
- 也有封装好的谷歌搜索的例子
from langchain.utilities import GoogleSerperAPIWrapper
def query_web(question):
search = GoogleSerperAPIWrapper()
return search.run(question)
---
query_web("今天北京天气?")
3. 调用链SequentialChain
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import AzureChatOpenAI
from langchain.chains import SequentialChain
from langchain.chat_models import AzureChatOpenAI
llm = AzureChatOpenAI(deployment_name = deployment, model_name=model, temperature=0, max_tokens=200)
summarizing_prompt_template = """
Summarize the following content into a sentence less than 20 words:
---
{content}
"""
summarizing_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(summarizing_prompt_template),
output_key = "summary"
)
translating_prompt_template = """
translate "{summary}" into Chinese:
"""
translating_chain = LLMChain(
llm = llm,
prompt=PromptTemplate.from_template(translating_prompt_template),
output_key = "translated"
)
overall_chain = SequentialChain(
chains=[summarizing_chain, translating_chain],
input_variables=["content"],
output_variables=[ "summary","translated"],
verbose=True
)
res = overall_chain("""
LangChain is a framework for developing applications powered by language models. It enables applications that are:
Data-aware: connect a language model to other sources of data
Agentic: allow a language model to interact with its environment
The main value props of LangChain are:
Components: abstractions for working with language models, along with a collection of implementations for each abstraction. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not
Off-the-shelf chains: a structured assembly of components for accomplishing specific higher-level tasks
Off-the-shelf chains make it easy to get started. For more complex applications and nuanced use-cases, components make it easy to customize existing chains or build new ones.
""")
print("summary:"+res["summary"])
print("中文:"+res["translated"])
让 ChatGPT有记忆功能 - ConversationBufferWindowMemory
1. 自己记住对话历史
import openai
def get_response(msg):
response = openai.ChatCompletion.create(
engine=deployment,
messages=msg,
temperature = 0.9,
max_tokens = 600
)
return response.choices[0].message.content
def history_to_prompt(chat_history):
msg = [{"role": "system", "content": "You are an AI assistant."}]
i = 0
for round_trip in chat_history:
msg.append({"role": "user", "content": round_trip[0]})
msg.append({"role": "assistant", "content": round_trip[1]})
return msg
def respond(message, chat_history):
his_msg = history_to_prompt(chat_history)
his_msg.append({"role": "user", "content": message})
bot_message = get_response(his_msg)
chat_history.append((message, bot_message))
return "", chat_history
import gradio as gr
with gr.Blocks() as demo:
chatbot = gr.Chatbot(height=480)
msg = gr.Textbox(label="Prompt")
btn = gr.Button("Submit")
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
gr.close_all()
demo.launch(share=True)
2. ConversationBufferWindowMemory
from langchain.chat_models import AzureChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationChain
llm = AzureChatOpenAI(deployment_name = deployment, model_name=model, temperature=0, max_tokens=200)
memory = ConversationBufferWindowMemory(k=10)
def get_response(input):
print("------------")
print(memory.load_memory_variables({}))
print("------------")
conversation_with_memory = ConversationChain(
llm=llm,
memory=memory,
verbose=False
)
return conversation_with_memory.predict(input=input)
import gradio as gr
def respond(message, chat_history):
bot_message = get_response(message)
chat_history.append((message, bot_message))
return "", chat_history
with gr.Blocks() as demo:
chatbot = gr.Chatbot(height=300)
msg = gr.Textbox(label="Prompt")
btn = gr.Button("Submit")
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
gr.close_all()
demo.launch(share=True)
启发新的交互方式
- 多用gpt的联想,推理能力,多和外界的内容链接起来