Spaces:
Running
on
Zero
Running
on
Zero
import os | |
from openai import OpenAI | |
import gradio as gr | |
api_key = os.environ.get('OPENAI_API_KEY') | |
client = OpenAI(api_key=api_key) | |
MODELS = [ | |
'gpt-4o', | |
'gpt-4o-mini', | |
'gpt-4', | |
'gpt-4-turbo', | |
'gpt-3.5-turbo', | |
] | |
def generate(message, history, model, temperature=1.0): | |
history_openai_format = [] | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human}) | |
history_openai_format.append({"role": "assistant", "content": assistant}) | |
history_openai_format.append({"role": "user", "content": message}) | |
response = client.chat.completions.create(model=model, | |
messages=history_openai_format, | |
temperature=temperature, | |
stream=True) | |
partial_message = "" | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.choices[0].delta.content | |
yield partial_message | |
chat_interface = gr.ChatInterface( | |
title='Private ChatGPT', | |
description='Chat with OpenAI models using their official API. OpenAI <a href="https://platform.openai.com/docs/concepts">promises</a> not to train on input or output of API calls.', | |
fn=generate, | |
additional_inputs=[ | |
gr.Dropdown(label='model', | |
choices=MODELS, | |
value=MODELS[0], | |
allow_custom_value=True), | |
gr.Slider(label="Temperature", | |
minimum=0., | |
maximum=1.2, | |
step=0.05, | |
value=1.0), | |
], | |
analytics_enabled=False, | |
show_progress='full', | |
) | |
chat_interface.launch(share=True) | |