Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,780 Bytes
44c4d91 c35600d 44c4d91 8b1f0bb 44c4d91 8b1f0bb 24d20df 44c4d91 24d20df 45afa26 44c4d91 a89fdf4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import os
from openai import OpenAI
import gradio as gr
api_key = os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=api_key)
MODELS = [
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4-turbo',
'gpt-3.5-turbo',
]
def generate(message, history, model, temperature=1.0):
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
response = client.chat.completions.create(model=model,
messages=history_openai_format,
temperature=temperature,
stream=True)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message = partial_message + chunk.choices[0].delta.content
yield partial_message
chat_interface = gr.ChatInterface(
title='Private ChatGPT',
description='Chat with OpenAI models using their official API. OpenAI <a href="https://platform.openai.com/docs/concepts">promises</a> not to train on input or output of API calls.',
fn=generate,
additional_inputs=[
gr.Dropdown(label='model',
choices=MODELS,
value=MODELS[0],
allow_custom_value=True),
gr.Slider(label="Temperature",
minimum=0.,
maximum=1.2,
step=0.05,
value=1.0),
],
analytics_enabled=False,
show_progress='full',
)
chat_interface.launch(share=True)
|