Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,105 +1,137 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import requests
|
3 |
-
import json
|
4 |
|
5 |
-
API_URL = "https://api.openai.com/v1/chat/completions"
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
payload = create_payload(messages, top_p, temperature)
|
10 |
-
response = make_request(API_URL, openai_api_key, payload)
|
11 |
|
12 |
-
|
13 |
-
|
14 |
|
15 |
-
|
16 |
-
if token_counter > 0:
|
17 |
-
chatbot.extend(new_chatbot)
|
18 |
-
history = new_history
|
19 |
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
user_message = {"role": "user", "content": chatbot[i][0]}
|
27 |
-
assistant_message = {"role": "assistant", "content": chatbot[i][1]}
|
28 |
-
messages.extend([user_message, assistant_message])
|
29 |
-
messages.append({"role": "user", "content": inputs})
|
30 |
-
return messages
|
31 |
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
"model": "gpt-4-1106-preview",
|
35 |
-
"messages": messages,
|
36 |
-
"temperature": temperature,
|
37 |
-
"top_p": top_p,
|
38 |
-
"n": 1,
|
39 |
"stream": True,
|
40 |
-
"presence_penalty":
|
41 |
-
"frequency_penalty":
|
42 |
-
|
43 |
|
44 |
-
|
45 |
-
headers = {
|
46 |
-
"Content-Type": "application/json",
|
47 |
-
"Authorization": f"Bearer {api_key}"
|
48 |
-
}
|
49 |
-
response = requests.post(url, headers=headers, json=payload, stream=True)
|
50 |
-
return response
|
51 |
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
token_counter = 0
|
54 |
partial_words = ""
|
55 |
-
for chunk in response.iter_lines():
|
56 |
-
if chunk:
|
57 |
-
chunk_str = chunk.decode('utf-8').lstrip('data: ')
|
58 |
-
if chunk_str.strip() in ["[DONE]", "}"]: # Adiciona } à verificação
|
59 |
-
break
|
60 |
-
try:
|
61 |
-
chunk_json = json.loads(chunk_str)
|
62 |
-
if 'choices' in chunk_json and len(chunk_json['choices']) > 0:
|
63 |
-
chunk_data = chunk_json['choices'][0].get('delta', {})
|
64 |
-
if 'content' in chunk_data:
|
65 |
-
content = chunk_data['content']
|
66 |
-
partial_words += content
|
67 |
-
if token_counter == 0:
|
68 |
-
history.append(" " + partial_words)
|
69 |
-
else:
|
70 |
-
history[-1] = partial_words
|
71 |
-
token_counter += 1
|
72 |
-
except json.JSONDecodeError as e:
|
73 |
-
print("Error decoding JSON response:", e)
|
74 |
-
print("Raw chunk:", chunk_str)
|
75 |
-
|
76 |
-
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)]
|
77 |
-
return chat, history, token_counter
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
def setup_ui():
|
82 |
-
with gr.Blocks() as demo:
|
83 |
-
with gr.Column():
|
84 |
-
openai_api_key = gr.Textbox(type='password', label="Insira sua chave de API OpenAI aqui")
|
85 |
-
chatbot = gr.Chatbot()
|
86 |
-
inputs = gr.Textbox(placeholder="Olá!", label="Digite uma entrada e pressione Enter", lines=3)
|
87 |
-
state = gr.State([])
|
88 |
-
b1 = gr.Button(value="Executar", variant="primary")
|
89 |
-
|
90 |
-
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p")
|
91 |
-
temperature = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Temperature")
|
92 |
-
chat_counter = gr.Number(value=0, visible=False)
|
93 |
-
|
94 |
-
inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
|
95 |
-
b1.click(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
|
96 |
-
|
97 |
-
return demo
|
98 |
-
|
99 |
-
def main():
|
100 |
-
demo = setup_ui()
|
101 |
-
demo.launch()
|
102 |
-
|
103 |
-
if __name__ == "__main__":
|
104 |
-
main()
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
+
import json
|
4 |
import requests
|
|
|
5 |
|
|
|
6 |
|
7 |
+
#Streaming endpoint
|
8 |
+
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
|
|
|
|
|
9 |
|
10 |
+
#Testing with my Open AI Key
|
11 |
+
#OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
12 |
|
13 |
+
def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]): #repetition_penalty, top_k
|
|
|
|
|
|
|
14 |
|
15 |
+
payload = {
|
16 |
+
"model": "gpt-4-1106-preview",
|
17 |
+
"messages": [{"role": "user", "content": f"{inputs}"}],
|
18 |
+
"temperature" : 1.0,
|
19 |
+
"top_p":1.0,
|
20 |
+
"n" : 1,
|
21 |
+
"stream": True,
|
22 |
+
"presence_penalty":0,
|
23 |
+
"frequency_penalty":0,
|
24 |
+
}
|
25 |
|
26 |
+
headers = {
|
27 |
+
"Content-Type": "application/json",
|
28 |
+
"Authorization": f"Bearer {openai_api_key}"
|
29 |
+
}
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
print(f"chat_counter - {chat_counter}")
|
32 |
+
if chat_counter != 0 :
|
33 |
+
messages=[]
|
34 |
+
for data in chatbot:
|
35 |
+
temp1 = {}
|
36 |
+
temp1["role"] = "user"
|
37 |
+
temp1["content"] = data[0]
|
38 |
+
temp2 = {}
|
39 |
+
temp2["role"] = "assistant"
|
40 |
+
temp2["content"] = data[1]
|
41 |
+
messages.append(temp1)
|
42 |
+
messages.append(temp2)
|
43 |
+
temp3 = {}
|
44 |
+
temp3["role"] = "user"
|
45 |
+
temp3["content"] = inputs
|
46 |
+
messages.append(temp3)
|
47 |
+
#messages
|
48 |
+
payload = {
|
49 |
"model": "gpt-4-1106-preview",
|
50 |
+
"messages": messages, #[{"role": "user", "content": f"{inputs}"}],
|
51 |
+
"temperature" : temperature, #1.0,
|
52 |
+
"top_p": top_p, #1.0,
|
53 |
+
"n" : 1,
|
54 |
"stream": True,
|
55 |
+
"presence_penalty":0,
|
56 |
+
"frequency_penalty":0,
|
57 |
+
}
|
58 |
|
59 |
+
chat_counter+=1
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
history.append(inputs)
|
62 |
+
print(f"payload is - {payload}")
|
63 |
+
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
64 |
+
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
65 |
+
#response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
66 |
token_counter = 0
|
67 |
partial_words = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
counter=0
|
70 |
+
for chunk in response.iter_lines():
|
71 |
+
#Skipping first chunk
|
72 |
+
if counter == 0:
|
73 |
+
counter+=1
|
74 |
+
continue
|
75 |
+
#counter+=1
|
76 |
+
# check whether each line is non-empty
|
77 |
+
if chunk.decode() :
|
78 |
+
chunk = chunk.decode()
|
79 |
+
# decode each line as response data is in bytes
|
80 |
+
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
81 |
+
#if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
|
82 |
+
# break
|
83 |
+
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
84 |
+
if token_counter == 0:
|
85 |
+
history.append(" " + partial_words)
|
86 |
+
else:
|
87 |
+
history[-1] = partial_words
|
88 |
+
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
|
89 |
+
token_counter+=1
|
90 |
+
yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
|
91 |
+
|
92 |
+
|
93 |
+
def reset_textbox():
|
94 |
+
return gr.update(value='')
|
95 |
+
|
96 |
+
title = """<h1 align="center">🔥ChatGPT-4 Turbo API 🚀Streaming🚀</h1>"""
|
97 |
+
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
98 |
+
```
|
99 |
+
User: <utterance>
|
100 |
+
Assistant: <utterance>
|
101 |
+
User: <utterance>
|
102 |
+
Assistant: <utterance>
|
103 |
+
...
|
104 |
+
```
|
105 |
+
In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
|
106 |
+
"""
|
107 |
+
|
108 |
+
css = """
|
109 |
+
#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
|
110 |
+
#chatbot {height: 520px; overflow: auto;}
|
111 |
+
"""
|
112 |
+
|
113 |
+
with gr.Blocks(css=css) as demo:
|
114 |
+
gr.HTML(title)
|
115 |
+
#gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPTwithAPI?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
116 |
+
with gr.Column(elem_id="col_container"):
|
117 |
+
openai_api_key = gr.Textbox(type='password', label="Insira sua chave de API OpenAI aqui")
|
118 |
+
chatbot = gr.Chatbot(elem_id="chatbot")
|
119 |
+
inputs = gr.Textbox(placeholder="Olá!", label="Digite uma entrada e pressione Enter", lines=3)
|
120 |
+
state = gr.State([])
|
121 |
+
b1 = gr.Button(value="Executar", variant="primary")
|
122 |
+
|
123 |
+
#inputs, top_p, temperature, top_k, repetition_penalty
|
124 |
+
with gr.Accordion("Parameters", open=False):
|
125 |
+
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
126 |
+
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
127 |
+
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
128 |
+
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
|
129 |
+
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
130 |
+
|
131 |
+
inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
|
132 |
+
b1.click( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
|
133 |
+
b1.click(reset_textbox, [], [inputs])
|
134 |
+
inputs.submit(reset_textbox, [], [inputs])
|
135 |
+
|
136 |
+
#gr.Markdown(description)
|
137 |
+
demo.queue().launch(debug=True)
|