gpt-3.5-turbo
官方文档:聊天完成 - OpenAI API
官方API文档:API Reference - OpenAI API
升级Plus教程可以找我,公众号: 真域宝库
文档地址:Plus教程
内部调用
使用Post请求到代理服务器
import datetime
import requests
import json
import time
url = "https://api.openai.com/v1/chat/completions"
payload = json.dumps({
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "system",
"content": "你是一个开发助手"
},
{
"role": "user",
"content": "使用python编写一个bs4示例"
}
]
})
headers = {
'Content-Type': 'application/json'
}
startTime = time.time()
response = requests.request("POST", url, headers=headers, data=payload)
res = json.loads(response.text)
endTime = time.time()
print("回答耗时:",endTime-startTime)
print(res["choices"][0]["message"]["content"])
system:定义领域
user:提问
assistant:可用于存储先前的相响应
响应/回复
{
"id": "chatcmpl-6uuAwBAey1ov1SyWVD8LQYew1WwEt",
"object": "chat.completion",
"created": 1679020326,
"model": "gpt-3.5-turbo-0301",
"usage": {
"prompt_tokens": 29,
"completion_tokens": 188,
"total_tokens": 217
},
"choices": [
{
"message": {
"role": "assistant",
"content": "好的,这是一个使用BeautifulSoup4库解析HTML代码的示例代码:\n\n```python\nfrom bs4 import BeautifulSoup\nimport requests\n\n# 获取网页内容\nurl = '<https://www.example.com>'\nresponse = requests.get(url)\nhtml = response.text\n\n# 解析HTML代码\nsoup = BeautifulSoup(html, 'html.parser')\n\n# 找到所有链接地址\nlinks = soup.find_all('a')\nfor link in links:\n print(link.get('href'))\n \n# 找到所有标题内容\ntitles = soup.find_all('h2')\nfor title in titles:\n print(title.text.strip())\n```\n\n以上代码首先通过`requests`库获取了一个网页的内容,然后使用`BeautifulSoup`对HTML进行解析。通过`find_all`方法找到了所有链接地址和标题内容,并通过循环遍历输出。"
},
"finish_reason": "stop",
"index": 0
}
]
}
获取回复: .response['choices'][0]['message']['content']
Every response will include a . The possible values for are:finish_reasonfinish_reason
- stop: API returned complete model output
- length: Incomplete model output due to max_tokens parameter or token limit
- content_filter: Omitted content due to a flag from our content filters
- null: API response still in progress or incomplete
持续回答
import datetime
import requests
import json
import time
time.sleep(10)
assistantContent = ""
while True:
url = "https://api.openai.com/v1/chat/completions"
print("====================\n")
userContent = input("[输入问题]:")
payload = json.dumps({
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "system",
"content": "你是一个中国考研领域的教授"
},
{
"role": "assistant",
"content": assistantContent
},
{
"role": "user",
"content": userContent
}
]
})
headers = {
'Content-Type': 'application/json',
'Authorization: Bearer sk-xxxxxxxxxxxxxxx' #api key
}
startTime = time.time()
print("正在计算中...")
response = requests.request("POST", url, headers=headers, data=payload)
res = json.loads(response.text)
endTime = time.time()
print("回答耗时:",endTime-startTime)
msg = res["choices"][0]["message"]["content"]
assistantContent = msg
print("====================\n",msg)
使用官方的python包进行调用
官方文档:platform.openai.com/docs/api-re…
import os
import openai
# 获取模型列表
def get_model_ist(str:str) -> dict:
res = openai.Model.list(str)
for data in res["data"]:
print("root:{}".format(data['root']))
return res
if __name__ =='__main__':
openai.api_base = "https://api.openai.com/v1"
openai.api_key ="sk-xxxxxxxxxxx"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "你是一个开发者"},
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices[0].message)
# 测试使用
get_model_ist(openai.api_key)
流模式举例
一个词一个词的蹦出来,好处是响应很快,不用等10几秒响应的结果一起展示
官方文档:openai-cookbook/How_to_stream_completions.ipynb at main · openai/openai-cookbook · GitHub
import openai
import time
openai.api_base = "<https://api.openai.com/v1>"
openai.api_key = "sk-xxxxxxxxx"
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{'role': 'system','content':"你是一个开发者"}, # 给gpt定义一个角色,也可以不写
{'role':'user','content': "请问如何使用django写登录认证接口"} # 问题
],
temperature = 0,
stream = True
)
collected_chunks = []
collected_messages = []
print("start response:")
for chunk in response:
time.sleep(0.1)
message = chunk["choices"][0]["delta"].get("content","")
print(message,end="")
collected_chunks.append(chunk)
chunk_message = chunk["choices"][0]["delta"]
collected_messages.append(chunk_message)
print("========")
full_reply_content = ''.join([m.get("content","") for m in collected_messages])
print(full_reply_content)
流模式循环举例
import openai
import time
def ai(question:str):
openai.api_base = "https://api.openai.com/v1"
openai.api_key = "sk-xxxxxxxxxxxx"
model = "gpt-3.5-turbo"
response = openai.ChatCompletion.create(
model = model,
messages = [
{'role': 'system', 'content': "你是一名开发者"}, # 给gpt定义一个角色,也可以不写
{'role': 'user', 'content': question} # 问题
],
temperature = 0,
stream = True
)
collected_chunks = []
collected_messages = []
print(f"OpenAI({model}) : ",end="")
for chunk in response:
time.sleep(0.1)
message = chunk["choices"][0]["delta"].get("content","")
print(message,end="")
collected_chunks.append(chunk)
chunk_message = chunk["choices"][0]["delta"]
collected_messages.append(chunk_message)
# full_reply_content = ''.join([m.get("content","") for m in collected_messages])
# print(full_reply_content)
if __name__ == '__main__':
while True:
question = input("[提问]: ")
startTime = time.time()
# 请求
ai(question)
print("耗时:",time.time()-startTime)
开源模板部署
官方文档
ChatGPT页面
OpenAI官方文档
升级Plus
Join WaitList
GPT-4 API waitlist (openai.com)