Commit
•
b5b72b0
0
Parent(s):
Duplicate from yuntian-deng/ChatGPT
Browse filesCo-authored-by: Yuntian Deng <yuntian-deng@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +14 -0
- app.py +194 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chat-with-GPT
|
3 |
+
emoji: 🚀
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.21.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: mit
|
11 |
+
duplicated_from: yuntian-deng/ChatGPT
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import json
|
5 |
+
import requests
|
6 |
+
|
7 |
+
MODEL = "gpt-3.5-turbo"
|
8 |
+
API_URL = os.getenv("API_URL")
|
9 |
+
DISABLED = os.getenv("DISABLED") == 'True'
|
10 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
11 |
+
NUM_THREADS = int(os.getenv("NUM_THREADS"))
|
12 |
+
|
13 |
+
print (NUM_THREADS)
|
14 |
+
|
15 |
+
def exception_handler(exception_type, exception, traceback):
|
16 |
+
print("%s: %s" % (exception_type.__name__, exception))
|
17 |
+
sys.excepthook = exception_handler
|
18 |
+
sys.tracebacklimit = 0
|
19 |
+
|
20 |
+
#https://github.com/gradio-app/gradio/issues/3531#issuecomment-1484029099
|
21 |
+
def parse_codeblock(text):
|
22 |
+
lines = text.split("\n")
|
23 |
+
for i, line in enumerate(lines):
|
24 |
+
if "```" in line:
|
25 |
+
if line != "```":
|
26 |
+
lines[i] = f'<pre><code class="{lines[i][3:]}">'
|
27 |
+
else:
|
28 |
+
lines[i] = '</code></pre>'
|
29 |
+
else:
|
30 |
+
if i > 0:
|
31 |
+
lines[i] = "<br/>" + line.replace("<", "<").replace(">", ">")
|
32 |
+
return "".join(lines)
|
33 |
+
|
34 |
+
def predict(inputs, top_p, temperature, chat_counter, chatbot, history, request:gr.Request):
|
35 |
+
payload = {
|
36 |
+
"model": MODEL,
|
37 |
+
"messages": [{"role": "user", "content": f"{inputs}"}],
|
38 |
+
"temperature" : 1.0,
|
39 |
+
"top_p":1.0,
|
40 |
+
"n" : 1,
|
41 |
+
"stream": True,
|
42 |
+
"presence_penalty":0,
|
43 |
+
"frequency_penalty":0,
|
44 |
+
}
|
45 |
+
|
46 |
+
headers = {
|
47 |
+
"Content-Type": "application/json",
|
48 |
+
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
49 |
+
"Headers": f"{request.kwargs['headers']}"
|
50 |
+
}
|
51 |
+
|
52 |
+
# print(f"chat_counter - {chat_counter}")
|
53 |
+
if chat_counter != 0 :
|
54 |
+
messages = []
|
55 |
+
for i, data in enumerate(history):
|
56 |
+
if i % 2 == 0:
|
57 |
+
role = 'user'
|
58 |
+
else:
|
59 |
+
role = 'assistant'
|
60 |
+
message = {}
|
61 |
+
message["role"] = role
|
62 |
+
message["content"] = data
|
63 |
+
messages.append(message)
|
64 |
+
|
65 |
+
message = {}
|
66 |
+
message["role"] = "user"
|
67 |
+
message["content"] = inputs
|
68 |
+
messages.append(message)
|
69 |
+
payload = {
|
70 |
+
"model": MODEL,
|
71 |
+
"messages": messages,
|
72 |
+
"temperature" : temperature,
|
73 |
+
"top_p": top_p,
|
74 |
+
"n" : 1,
|
75 |
+
"stream": True,
|
76 |
+
"presence_penalty":0,
|
77 |
+
"frequency_penalty":0,
|
78 |
+
}
|
79 |
+
|
80 |
+
chat_counter += 1
|
81 |
+
|
82 |
+
history.append(inputs)
|
83 |
+
token_counter = 0
|
84 |
+
partial_words = ""
|
85 |
+
counter = 0
|
86 |
+
|
87 |
+
try:
|
88 |
+
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
89 |
+
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
90 |
+
response_code = f"{response}"
|
91 |
+
#if response_code.strip() != "<Response [200]>":
|
92 |
+
# #print(f"response code - {response}")
|
93 |
+
# raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
|
94 |
+
|
95 |
+
for chunk in response.iter_lines():
|
96 |
+
#Skipping first chunk
|
97 |
+
if counter == 0:
|
98 |
+
counter += 1
|
99 |
+
continue
|
100 |
+
#counter+=1
|
101 |
+
# check whether each line is non-empty
|
102 |
+
if chunk.decode() :
|
103 |
+
chunk = chunk.decode()
|
104 |
+
# decode each line as response data is in bytes
|
105 |
+
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
106 |
+
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
107 |
+
if token_counter == 0:
|
108 |
+
history.append(" " + partial_words)
|
109 |
+
else:
|
110 |
+
history[-1] = partial_words
|
111 |
+
token_counter += 1
|
112 |
+
yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=False), gr.update(interactive=False) # resembles {chatbot: chat, state: history}
|
113 |
+
except Exception as e:
|
114 |
+
print (f'error found: {e}')
|
115 |
+
yield [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ], history, chat_counter, response, gr.update(interactive=True), gr.update(interactive=True)
|
116 |
+
print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
|
117 |
+
|
118 |
+
|
119 |
+
def reset_textbox():
|
120 |
+
return gr.update(value='', interactive=False), gr.update(interactive=False)
|
121 |
+
|
122 |
+
title = """<h1 align="center">GPT-3.5 Chatbot</h1>"""
|
123 |
+
if DISABLED:
|
124 |
+
title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.</h1>"""
|
125 |
+
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
126 |
+
```
|
127 |
+
User: <utterance>
|
128 |
+
Assistant: <utterance>
|
129 |
+
User: <utterance>
|
130 |
+
Assistant: <utterance>
|
131 |
+
...
|
132 |
+
```
|
133 |
+
In this app, you can explore the outputs of a gpt-3.5 LLM.
|
134 |
+
"""
|
135 |
+
|
136 |
+
theme = gr.themes.Default(primary_hue="green")
|
137 |
+
|
138 |
+
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
139 |
+
#chatbot {height: 520px; overflow: auto;}""",
|
140 |
+
theme=theme) as demo:
|
141 |
+
gr.HTML(title)
|
142 |
+
gr.HTML("""<h3 align="center">This app provides you full access to GPT-3.5 (4096 token limit). You don't need any OPENAI API key.</h1>""")
|
143 |
+
#gr.HTML('''<center><a href="https://huggingface.co/spaces/yuntian-deng/ChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
144 |
+
with gr.Column(elem_id = "col_container", visible=False) as main_block:
|
145 |
+
#API Key is provided by OpenAI
|
146 |
+
#openai_api_key = gr.Textbox(type='password', label="Enter only your OpenAI API key here")
|
147 |
+
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
148 |
+
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
|
149 |
+
state = gr.State([]) #s
|
150 |
+
with gr.Row():
|
151 |
+
with gr.Column(scale=7):
|
152 |
+
b1 = gr.Button(visible=not DISABLED).style(full_width=True)
|
153 |
+
with gr.Column(scale=3):
|
154 |
+
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
155 |
+
|
156 |
+
#inputs, top_p, temperature, top_k, repetition_penalty
|
157 |
+
with gr.Accordion("Parameters", open=False):
|
158 |
+
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
159 |
+
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
160 |
+
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
161 |
+
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
|
162 |
+
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
163 |
+
|
164 |
+
with gr.Column(elem_id = "user_consent_container") as user_consent_block:
|
165 |
+
# Get user consent
|
166 |
+
accept_checkbox = gr.Checkbox(visible=False)
|
167 |
+
js = "(x) => confirm('By clicking \"OK\", I agree that my data may be published or shared.')"
|
168 |
+
with gr.Accordion("User Consent for Data Collection, Use, and Sharing", open=True):
|
169 |
+
gr.HTML("""
|
170 |
+
<div>
|
171 |
+
<p>By using our app, which is powered by OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:</p>
|
172 |
+
<ol>
|
173 |
+
<li><strong>Collection:</strong> We may collect information, including the inputs you type into our app, the outputs generated by OpenAI's API, and certain technical details about your device and connection (such as browser type, operating system, and IP address) provided by your device's request headers.</li>
|
174 |
+
<li><strong>Use:</strong> We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications, and for security purposes, such as protecting against unauthorized access and attacks.</li>
|
175 |
+
<li><strong>Sharing and Publication:</strong> Your data, including the technical details collected from your device's request headers, may be published, shared with third parties, or used for analysis and reporting purposes.</li>
|
176 |
+
<li><strong>Data Retention:</strong> We may retain your data, including the technical details collected from your device's request headers, for as long as necessary.</li>
|
177 |
+
</ol>
|
178 |
+
<p>By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app.</p>
|
179 |
+
</div>
|
180 |
+
""")
|
181 |
+
accept_button = gr.Button("I Agree")
|
182 |
+
|
183 |
+
def enable_inputs():
|
184 |
+
return user_consent_block.update(visible=False), main_block.update(visible=True)
|
185 |
+
|
186 |
+
accept_button.click(None, None, accept_checkbox, _js=js, queue=False)
|
187 |
+
accept_checkbox.change(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block], queue=False)
|
188 |
+
|
189 |
+
inputs.submit(reset_textbox, [], [inputs, b1], queue=False)
|
190 |
+
inputs.submit(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
191 |
+
b1.click(reset_textbox, [], [inputs, b1], queue=False)
|
192 |
+
b1.click(predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code, inputs, b1],) #openai_api_key
|
193 |
+
|
194 |
+
demo.queue(max_size=20, concurrency_count=NUM_THREADS, api_open=False).launch(share=False)
|