Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
import json | |
import requests | |
import openai | |
try: | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
except KeyError: | |
error_message = "System is at capacity right now.Please try again later" | |
print(error_message) | |
def chatbot(input): | |
return error_message | |
else: | |
messages = [ | |
{"role": "system", "content": "My AI Assistant"}, | |
] | |
#Streaming endpoint for OPENAI ChatGPT | |
API_URL = "https://api.openai.com/v1/chat/completions" | |
top_p_chatgpt = 1.0 | |
temperature_chatgpt = 1.0 | |
#Predict function for CHATGPT | |
def chatbot(inputs, chat_counter_chatgpt, chatbot_chatgpt=[], history=[]): | |
#Define payload and header for chatgpt API | |
payload = { | |
"model": "gpt-3.5-turbo", | |
"messages": [{"role": "user", "content": f"{inputs}"}], | |
"temperature" : 1.0, | |
"top_p":1.0, | |
"n" : 1, | |
"stream": True, | |
"presence_penalty":0, | |
"frequency_penalty":0, | |
} | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {openai.api_key}" | |
} | |
#Handling the different roles for ChatGPT | |
if chat_counter_chatgpt != 0 : | |
messages=[] | |
for data in chatbot_chatgpt: | |
temp1 = {} | |
temp1["role"] = "user" | |
temp1["content"] = data[0] | |
temp2 = {} | |
temp2["role"] = "assistant" | |
temp2["content"] = data[1] | |
messages.append(temp1) | |
messages.append(temp2) | |
temp3 = {} | |
temp3["role"] = "user" | |
temp3["content"] = inputs | |
messages.append(temp3) | |
payload = { | |
"model": "gpt-3.5-turbo", | |
"messages": messages, #[{"role": "user", "content": f"{inputs}"}], | |
"temperature" : temperature_chatgpt, #1.0, | |
"top_p": top_p_chatgpt, #1.0, | |
"n" : 1, | |
"stream": True, | |
"presence_penalty":0, | |
"frequency_penalty":0, | |
} | |
chat_counter_chatgpt+=1 | |
history.append("You asked: "+ inputs) | |
# make a POST request to the API endpoint using the requests.post method, passing in stream=True | |
response = requests.post(API_URL, headers=headers, json=payload, stream=True) | |
token_counter = 0 | |
partial_words = "" | |
counter=0 | |
for chunk in response.iter_lines(): | |
#Skipping the first chunk | |
if counter == 0: | |
counter+=1 | |
continue | |
# check whether each line is non-empty | |
if chunk.decode() : | |
chunk = chunk.decode() | |
# decode each line as response data is in bytes | |
if len(chunk) > 13 and "content" in json.loads(chunk[6:])['choices'][0]["delta"]: | |
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] | |
if token_counter == 0: | |
history.append(" " + partial_words) | |
else: | |
history[-1] = partial_words | |
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list | |
token_counter+=1 | |
yield chat, history, chat_counter_chatgpt # this resembles {chatbot: chat, state: history} | |
def reset_textbox(): | |
return gr.update(value="") | |
def reset_chat(chatbot, state): | |
return None, [] | |
with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;} | |
#chatgpt {height: 400px; overflow: auto;}} """, theme=gr.themes.Default(primary_hue="slate") ) as demo: | |
with gr.Row(): | |
with gr.Column(scale=14): | |
with gr.Box(): | |
with gr.Row(): | |
with gr.Column(scale=13): | |
inputs = gr.Textbox(label="Ask me anything ⤵️ Try: Value of pi" ) | |
with gr.Column(scale=1): | |
b1 = gr.Button('Submit', elem_id = 'submit').style(full_width=True) | |
b2 = gr.Button('Clear', elem_id = 'clear').style(full_width=True) | |
state_chatgpt = gr.State([]) | |
with gr.Box(): | |
with gr.Row(): | |
chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='') | |
chat_counter_chatgpt = gr.Number(value=0, visible=False, precision=0) | |
inputs.submit(reset_textbox, [], [inputs]) | |
b1.click( chatbot, | |
[ inputs, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt], | |
[chatbot_chatgpt, state_chatgpt],) | |
b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt]) | |
demo.queue(concurrency_count=16).launch(height= 2500, debug=True) | |