mirror of
https://github.com/gradio-app/gradio.git
synced 2024-11-21 01:01:05 +08:00
Streaming example for the updated OpenAI API (#7508)
* Changes for updated OpenAI api * Update guides/04_chatbots/01_creating-a-chatbot-fast.md --------- Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
parent
33f68cb6c2
commit
8f050eedbc
@ -198,10 +198,11 @@ gr.ChatInterface(predict).launch()
|
||||
Of course, we could also use the `openai` library directy. Here a similar example, but this time with streaming results as well:
|
||||
|
||||
```python
|
||||
import openai
|
||||
from openai import OpenAI
|
||||
import gradio as gr
|
||||
|
||||
openai.api_key = "sk-..." # Replace with your key
|
||||
api_key = "sk-..." # Replace with your key
|
||||
client = OpenAI(api_key=api_key)
|
||||
|
||||
def predict(message, history):
|
||||
history_openai_format = []
|
||||
@ -209,19 +210,17 @@ def predict(message, history):
|
||||
history_openai_format.append({"role": "user", "content": human })
|
||||
history_openai_format.append({"role": "assistant", "content":assistant})
|
||||
history_openai_format.append({"role": "user", "content": message})
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model='gpt-3.5-turbo',
|
||||
messages= history_openai_format,
|
||||
temperature=1.0,
|
||||
stream=True
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(model='gpt-3.5-turbo',
|
||||
messages= history_openai_format,
|
||||
temperature=1.0,
|
||||
stream=True)
|
||||
|
||||
partial_message = ""
|
||||
for chunk in response:
|
||||
if len(chunk['choices'][0]['delta']) != 0:
|
||||
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
||||
yield partial_message
|
||||
if chunk.choices[0].delta.content is not None:
|
||||
partial_message = partial_message + chunk.choices[0].delta.content
|
||||
yield partial_message
|
||||
|
||||
gr.ChatInterface(predict).launch()
|
||||
```
|
||||
|
Loading…
Reference in New Issue
Block a user