Spaces:
Runtime error
Runtime error
productizationlabs
commited on
Commit
•
a5f743e
1
Parent(s):
80f95c1
Upload app.py
Browse files
app.py
CHANGED
@@ -1,136 +1,41 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
import requests
|
5 |
-
|
6 |
-
|
7 |
-
try:
|
8 |
-
openai.api_key = os.environ["OPENAI_API_KEY"]
|
9 |
-
|
10 |
except KeyError:
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
top_p_chatgpt
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
messages=[]
|
47 |
-
for data in chatbot_chatgpt:
|
48 |
-
temp1 = {}
|
49 |
-
temp1["role"] = "user"
|
50 |
-
temp1["content"] = data[0]
|
51 |
-
temp2 = {}
|
52 |
-
temp2["role"] = "assistant"
|
53 |
-
temp2["content"] = data[1]
|
54 |
-
messages.append(temp1)
|
55 |
-
messages.append(temp2)
|
56 |
-
temp3 = {}
|
57 |
-
temp3["role"] = "user"
|
58 |
-
temp3["content"] = inputs
|
59 |
-
messages.append(temp3)
|
60 |
-
payload = {
|
61 |
-
"model": "gpt-3.5-turbo",
|
62 |
-
"messages": messages, #[{"role": "user", "content": f"{inputs}"}],
|
63 |
-
"temperature" : temperature_chatgpt, #1.0,
|
64 |
-
"top_p": top_p_chatgpt, #1.0,
|
65 |
-
"n" : 1,
|
66 |
-
"stream": True,
|
67 |
-
"presence_penalty":0,
|
68 |
-
"frequency_penalty":0,
|
69 |
-
}
|
70 |
-
|
71 |
-
chat_counter_chatgpt+=1
|
72 |
-
|
73 |
-
history.append("You asked: "+ inputs)
|
74 |
-
|
75 |
-
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
76 |
-
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
77 |
-
token_counter = 0
|
78 |
-
partial_words = ""
|
79 |
-
|
80 |
-
counter=0
|
81 |
-
for chunk in response.iter_lines():
|
82 |
-
#Skipping the first chunk
|
83 |
-
if counter == 0:
|
84 |
-
counter+=1
|
85 |
-
continue
|
86 |
-
# check whether each line is non-empty
|
87 |
-
if chunk.decode() :
|
88 |
-
chunk = chunk.decode()
|
89 |
-
# decode each line as response data is in bytes
|
90 |
-
if len(chunk) > 13 and "content" in json.loads(chunk[6:])['choices'][0]["delta"]:
|
91 |
-
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
92 |
-
if token_counter == 0:
|
93 |
-
history.append(" " + partial_words)
|
94 |
-
else:
|
95 |
-
history[-1] = partial_words
|
96 |
-
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
|
97 |
-
token_counter+=1
|
98 |
-
yield chat, history, chat_counter_chatgpt # this resembles {chatbot: chat, state: history}
|
99 |
-
|
100 |
-
|
101 |
-
def reset_textbox():
|
102 |
-
return gr.update(value="")
|
103 |
-
|
104 |
-
def reset_chat(chatbot, state):
|
105 |
-
return None, []
|
106 |
-
|
107 |
-
with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
|
108 |
-
#chatgpt {height: 400px; overflow: auto;}} """, theme=gr.themes.Default(primary_hue="slate") ) as demo:
|
109 |
-
with gr.Row():
|
110 |
-
with gr.Column(scale=14):
|
111 |
-
with gr.Box():
|
112 |
-
with gr.Row():
|
113 |
-
with gr.Column(scale=13):
|
114 |
-
inputs = gr.Textbox(label="Ask me anything ⤵️ Try: Value of pi" )
|
115 |
-
with gr.Column(scale=1):
|
116 |
-
b1 = gr.Button('Submit', elem_id = 'submit').style(full_width=True)
|
117 |
-
b2 = gr.Button('Clear', elem_id = 'clear').style(full_width=True)
|
118 |
-
state_chatgpt = gr.State([])
|
119 |
-
|
120 |
-
with gr.Box():
|
121 |
-
with gr.Row():
|
122 |
-
chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='My ChatGPT Turbo')
|
123 |
-
chat_counter_chatgpt = gr.Number(value=0, visible=False, precision=0)
|
124 |
-
|
125 |
-
|
126 |
-
inputs.submit(reset_textbox, [], [inputs])
|
127 |
-
|
128 |
-
b1.click( chatbot,
|
129 |
-
[ inputs, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
|
130 |
-
[chatbot_chatgpt, state_chatgpt],)
|
131 |
-
|
132 |
-
b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
|
133 |
-
|
134 |
-
demo.queue(concurrency_count=16).launch(height= 2500, debug=True)
|
135 |
-
|
136 |
-
|
|
|
1 |
+
_C='role'
|
2 |
+
_B=True
|
3 |
+
_A='content'
|
4 |
+
import os,gradio as gr,json,requests,openai
|
5 |
+
try:openai.api_key=os.environ['OPENAI_API_KEY']
|
|
|
|
|
|
|
|
|
6 |
except KeyError:
|
7 |
+
error_message='System is at capacity right now.Please try again later';print(error_message)
|
8 |
+
def chatbot(input):return error_message
|
9 |
+
else:messages=[{_C:'system',_A:'My AI Assistant'}]
|
10 |
+
API_URL='https://api.openai.com/v1/chat/completions'
|
11 |
+
top_p_chatgpt=1.0
|
12 |
+
temperature_chatgpt=1.0
|
13 |
+
def chatbot(inputs,chat_counter_chatgpt,chatbot_chatgpt=[],history=[]):
|
14 |
+
X='delta';W='choices';V='gpt-3.5-turbo';U='frequency_penalty';T='presence_penalty';S='stream';R='top_p';Q='temperature';P='messages';O='model';J='user';F=chat_counter_chatgpt;E=inputs;A=history;K={O:V,P:[{_C:J,_A:f"{E}"}],Q:1.0,R:1.0,'n':1,S:_B,T:0,U:0};Y={'Content-Type':'application/json','Authorization':f"Bearer {openai.api_key}"}
|
15 |
+
if F!=0:
|
16 |
+
C=[]
|
17 |
+
for L in chatbot_chatgpt:G={};G[_C]=J;G[_A]=L[0];H={};H[_C]='assistant';H[_A]=L[1];C.append(G);C.append(H)
|
18 |
+
I={};I[_C]=J;I[_A]=E;C.append(I);K={O:V,P:C,Q:temperature_chatgpt,R:top_p_chatgpt,'n':1,S:_B,T:0,U:0}
|
19 |
+
F+=1;A.append('You asked: '+E);Z=requests.post(API_URL,headers=Y,json=K,stream=_B);M=0;D='';N=0
|
20 |
+
for B in Z.iter_lines():
|
21 |
+
if N==0:N+=1;continue
|
22 |
+
if B.decode():
|
23 |
+
B=B.decode()
|
24 |
+
if len(B)>13 and _A in json.loads(B[6:])[W][0][X]:
|
25 |
+
D=D+json.loads(B[6:])[W][0][X][_A]
|
26 |
+
if M==0:A.append(' '+D)
|
27 |
+
else:A[-1]=D
|
28 |
+
a=[(A[B],A[B+1])for B in range(0,len(A)-1,2)];M+=1;yield(a,A,F)
|
29 |
+
def reset_textbox():return gr.update(value='')
|
30 |
+
def reset_chat(chatbot,state):return None,[]
|
31 |
+
with gr.Blocks(css='#col_container {width: 1000px; margin-left: auto; margin-right: auto;}\n #chatgpt {height: 400px; overflow: auto;}} ',theme=gr.themes.Default(primary_hue='slate'))as ChatGPTTurbo:
|
32 |
+
with gr.Row():
|
33 |
+
with gr.Column(scale=14):
|
34 |
+
with gr.Box():
|
35 |
+
with gr.Row():
|
36 |
+
with gr.Column(scale=13):inputs=gr.Textbox(label='Ask me anything ⤵️ Try: Value of pi')
|
37 |
+
with gr.Column(scale=1):b1=gr.Button('Submit',elem_id='submit').style(full_width=_B);b2=gr.Button('Clear',elem_id='clear').style(full_width=_B)
|
38 |
+
state_chatgpt=gr.State([])
|
39 |
+
with gr.Box():
|
40 |
+
with gr.Row():chatbot_chatgpt=gr.Chatbot(elem_id='chatgpt',label='My ChatGPT Turbo')
|
41 |
+
chat_counter_chatgpt=gr.Number(value=0,visible=False,precision=0);inputs.submit(reset_textbox,[],[inputs]);b1.click(chatbot,[inputs,chat_counter_chatgpt,chatbot_chatgpt,state_chatgpt],[chatbot_chatgpt,state_chatgpt]);b2.click(reset_chat,[chatbot_chatgpt,state_chatgpt],[chatbot_chatgpt,state_chatgpt]);ChatGPTTurbo.queue(concurrency_count=16).launch(height=2500,debug=_B)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|