ysharma HF staff commited on
Commit
a7047db
0 Parent(s):

Duplicate from ysharma/OSChatbots_ChatGPT_ToeToToe

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +299 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: OSChatbots ChatGPT ToeToToe
3
+ emoji: 🏢
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.20.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: ysharma/OSChatbots_ChatGPT_ToeToToe
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import requests
4
+ import os
5
+ from text_generation import Client, InferenceAPIClient
6
+
7
+ # Load pre-trained model and tokenizer - for THUDM model
8
+ from transformers import AutoModel, AutoTokenizer
9
+ tokenizer_glm = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
10
+ model_glm = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
11
+ model_glm = model_glm.eval()
12
+
13
+ # Load pre-trained model and tokenizer for Chinese to English translator
14
+ from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
15
+ model_chtoen = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
16
+ tokenizer_chtoen = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
17
+
18
+ #Streaming endpoint for OPENAI ChatGPT
19
+ API_URL = "https://api.openai.com/v1/chat/completions"
20
+ #Streaming endpoint for OPENCHATKIT
21
+ API_URL_TGTHR = os.getenv('API_URL_TGTHR')
22
+
23
+ openchat_preprompt = (
24
+ "\n<human>: Hi!\n<bot>: My name is Bot, model version is 0.15, part of an open-source kit for "
25
+ "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source "
26
+ "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, "
27
+ "but I am programmed to be helpful, polite, honest, and friendly.\n")
28
+
29
+ #Predict function for CHATGPT
30
+ def predict_chatgpt(inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt=[], history=[]):
31
+ #Define payload and header for chatgpt API
32
+ payload = {
33
+ "model": "gpt-3.5-turbo",
34
+ "messages": [{"role": "user", "content": f"{inputs}"}],
35
+ "temperature" : 1.0,
36
+ "top_p":1.0,
37
+ "n" : 1,
38
+ "stream": True,
39
+ "presence_penalty":0,
40
+ "frequency_penalty":0,
41
+ }
42
+
43
+ headers = {
44
+ "Content-Type": "application/json",
45
+ "Authorization": f"Bearer {openai_api_key}"
46
+ }
47
+ #debug
48
+ #print(f"chat_counter_chatgpt - {chat_counter_chatgpt}")
49
+
50
+ #Handling the different roles for ChatGPT
51
+ if chat_counter_chatgpt != 0 :
52
+ messages=[]
53
+ for data in chatbot_chatgpt:
54
+ temp1 = {}
55
+ temp1["role"] = "user"
56
+ temp1["content"] = data[0]
57
+ temp2 = {}
58
+ temp2["role"] = "assistant"
59
+ temp2["content"] = data[1]
60
+ messages.append(temp1)
61
+ messages.append(temp2)
62
+ temp3 = {}
63
+ temp3["role"] = "user"
64
+ temp3["content"] = inputs
65
+ messages.append(temp3)
66
+ payload = {
67
+ "model": "gpt-3.5-turbo",
68
+ "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
69
+ "temperature" : temperature_chatgpt, #1.0,
70
+ "top_p": top_p_chatgpt, #1.0,
71
+ "n" : 1,
72
+ "stream": True,
73
+ "presence_penalty":0,
74
+ "frequency_penalty":0,
75
+ }
76
+
77
+ chat_counter_chatgpt+=1
78
+
79
+ history.append(inputs)
80
+
81
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
82
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
83
+ token_counter = 0
84
+ partial_words = ""
85
+
86
+ counter=0
87
+ for chunk in response.iter_lines():
88
+ #Skipping the first chunk
89
+ if counter == 0:
90
+ counter+=1
91
+ continue
92
+ # check whether each line is non-empty
93
+ if chunk.decode() :
94
+ chunk = chunk.decode()
95
+ # decode each line as response data is in bytes
96
+ if len(chunk) > 13 and "content" in json.loads(chunk[6:])['choices'][0]["delta"]:
97
+ partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
98
+ if token_counter == 0:
99
+ history.append(" " + partial_words)
100
+ else:
101
+ history[-1] = partial_words
102
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
103
+ token_counter+=1
104
+ yield chat, history, chat_counter_chatgpt # this resembles {chatbot: chat, state: history}
105
+
106
+
107
+ #Predict function for OPENCHATKIT
108
+ def predict_together(model: str,
109
+ inputs: str,
110
+ top_p: float,
111
+ temperature: float,
112
+ top_k: int,
113
+ repetition_penalty: float,
114
+ watermark: bool,
115
+ chatbot,
116
+ history,):
117
+
118
+ client = Client(os.getenv("API_URL_TGTHR")) #get_client(model)
119
+ # debug
120
+ #print(f"^^client is - {client}")
121
+ user_name, assistant_name = "<human>: ", "<bot>: "
122
+ preprompt = openchat_preprompt
123
+ sep = '\n'
124
+
125
+ history.append(inputs)
126
+
127
+ past = []
128
+ for data in chatbot:
129
+ user_data, model_data = data
130
+
131
+ if not user_data.startswith(user_name):
132
+ user_data = user_name + user_data
133
+ if not model_data.startswith("\n" + assistant_name):
134
+ model_data = "\n" + assistant_name + model_data
135
+
136
+ past.append(user_data + model_data.rstrip() + "\n")
137
+
138
+ if not inputs.startswith(user_name):
139
+ inputs = user_name + inputs
140
+
141
+ total_inputs = preprompt + "".join(past) + inputs + "\n" + assistant_name.rstrip()
142
+ # truncate total_inputs
143
+ #total_inputs = total_inputs[-1000:]
144
+
145
+ partial_words = ""
146
+
147
+ for i, response in enumerate(client.generate_stream(
148
+ total_inputs,
149
+ top_p=top_p,
150
+ top_k=top_k,
151
+ repetition_penalty=repetition_penalty,
152
+ watermark=watermark,
153
+ temperature=temperature,
154
+ max_new_tokens=500,
155
+ stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
156
+ )):
157
+ if response.token.special:
158
+ continue
159
+
160
+ partial_words = partial_words + response.token.text
161
+ if partial_words.endswith(user_name.rstrip()):
162
+ partial_words = partial_words.rstrip(user_name.rstrip())
163
+ if partial_words.endswith(assistant_name.rstrip()):
164
+ partial_words = partial_words.rstrip(assistant_name.rstrip())
165
+
166
+ if i == 0:
167
+ history.append(" " + partial_words)
168
+ else:
169
+ history[-1] = partial_words
170
+
171
+ chat = [
172
+ (history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)
173
+ ]
174
+ yield chat, history
175
+
176
+ # Define function to generate model predictions and update the history
177
+ def predict_glm(input, history=[]):
178
+ response, history = model_glm.chat(tokenizer_glm, input, history)
179
+ # translate Chinese to English
180
+ history = [(query, translate_Chinese_English(response)) for query, response in history]
181
+ return history, history #[history] + updates
182
+
183
+ def translate_Chinese_English(chinese_text):
184
+ # translate Chinese to English
185
+ tokenizer_chtoen.src_lang = "zh"
186
+ encoded_zh = tokenizer_chtoen(chinese_text, return_tensors="pt")
187
+ generated_tokens = model_chtoen.generate(**encoded_zh, forced_bos_token_id=tokenizer_chtoen.get_lang_id("en"))
188
+ trans_eng_text = tokenizer_chtoen.batch_decode(generated_tokens, skip_special_tokens=True)
189
+ return trans_eng_text[0]
190
+
191
+ """
192
+ with gr.Blocks() as demo:
193
+ chatbot = gr.Chatbot()
194
+ state = gr.State([])
195
+
196
+ with gr.Row():
197
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
198
+
199
+ txt.submit(predict, [txt, state], [chatbot, state])
200
+
201
+ demo.launch(debug=True)
202
+ """
203
+
204
+ def reset_textbox():
205
+ return gr.update(value="")
206
+
207
+ def reset_chat(chatbot, state):
208
+ # debug
209
+ #print(f"^^chatbot value is - {chatbot}")
210
+ #print(f"^^state value is - {state}")
211
+ return None, []
212
+
213
+
214
+ #title = """<h1 align="center">🔥🔥Comparison: ChatGPT & OpenChatKit </h1><br><h3 align="center">🚀A Gradio Streaming Demo</h3><br>Official Demo: <a href="https://huggingface.co/spaces/togethercomputer/OpenChatKit">OpenChatKit feedback app</a>"""
215
+ title = """<h1 align="center">🔥🔥Comparison: ChatGPT & Open Sourced CHatGLM-6B </h1><br><h3 align="center">🚀A Gradio Chatbot Demo</h3>"""
216
+ description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
217
+ ```
218
+ User: <utterance>
219
+ Assistant: <utterance>
220
+ User: <utterance>
221
+ Assistant: <utterance>
222
+ ...
223
+ ```
224
+ In this app, you can explore the outputs of multiple LLMs when prompted in similar ways.
225
+ """
226
+
227
+ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
228
+ #chatgpt {height: 520px; overflow: auto;}
229
+ #chatglm {height: 520px; overflow: auto;} """ ) as demo:
230
+ #chattogether {height: 520px; overflow: auto;} """ ) as demo:
231
+ #clear {width: 100px; height:50px; font-size:12px}""") as demo:
232
+ gr.HTML(title)
233
+ with gr.Row():
234
+ with gr.Column(scale=14):
235
+ with gr.Box():
236
+ with gr.Row():
237
+ with gr.Column(scale=13):
238
+ openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here for ChatGPT")
239
+ inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter ⤵️ " )
240
+ with gr.Column(scale=1):
241
+ b1 = gr.Button('🏃Run', elem_id = 'run').style(full_width=True)
242
+ b2 = gr.Button('🔄Clear up Chatbots!', elem_id = 'clear').style(full_width=True)
243
+ state_chatgpt = gr.State([])
244
+ #state_together = gr.State([])
245
+ state_glm = gr.State([])
246
+
247
+ with gr.Box():
248
+ with gr.Row():
249
+ chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='ChatGPT API - OPENAI')
250
+ #chatbot_together = gr.Chatbot(elem_id="chattogether", label='OpenChatKit - Text Generation')
251
+ chatbot_glm = gr.Chatbot(elem_id="chatglm", label='THUDM-ChatGLM6B')
252
+
253
+ with gr.Column(scale=2, elem_id='parameters'):
254
+ with gr.Box():
255
+ gr.HTML("Parameters for #OpenCHAtKit", visible=False)
256
+ top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.25, step=0.05,interactive=True, label="Top-p", visible=False)
257
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.6, step=0.1, interactive=True, label="Temperature", visible=False)
258
+ top_k = gr.Slider( minimum=1, maximum=50, value=50, step=1, interactive=True, label="Top-k", visible=False)
259
+ repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.01, step=0.01, interactive=True, label="Repetition Penalty", visible=False)
260
+ watermark = gr.Checkbox(value=True, label="Text watermarking", visible=False)
261
+ model = gr.CheckboxGroup(value="Rallio67/joi2_20B_instruct_alpha",
262
+ choices=["togethercomputer/GPT-NeoXT-Chat-Base-20B", "Rallio67/joi2_20B_instruct_alpha", "google/flan-t5-xxl", "google/flan-ul2", "bigscience/bloomz", "EleutherAI/gpt-neox-20b",],
263
+ label="Model",visible=False,)
264
+ temp_textbox_together = gr.Textbox(value=model.choices[0], visible=False)
265
+
266
+ with gr.Box():
267
+ gr.HTML("Parameters for OpenAI's ChatGPT")
268
+ top_p_chatgpt = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p",)
269
+ temperature_chatgpt = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
270
+ chat_counter_chatgpt = gr.Number(value=0, visible=False, precision=0)
271
+
272
+ inputs.submit(reset_textbox, [], [inputs])
273
+
274
+ inputs.submit( predict_chatgpt,
275
+ [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
276
+ [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
277
+ #inputs.submit( predict_together,
278
+ # [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
279
+ # [chatbot_together, state_together],)
280
+ inputs.submit( predict_glm,
281
+ [inputs, state_glm, ],
282
+ [chatbot_glm, state_glm],)
283
+ b1.click( predict_chatgpt,
284
+ [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
285
+ [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
286
+ #b1.click( predict_together,
287
+ # [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
288
+ # [chatbot_together, state_together],)
289
+ b1.click( predict_glm,
290
+ [inputs, state_glm, ],
291
+ [chatbot_glm, state_glm],)
292
+
293
+ b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
294
+ #b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
295
+ b2.click(reset_chat, [chatbot_glm, state_glm], [chatbot_glm, state_glm])
296
+
297
+ gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/OpenChatKit_ChatGPT_Comparison?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
298
+ gr.Markdown(description)
299
+ demo.queue(concurrency_count=16).launch(height= 2500, debug=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ text_generation
2
+ protobuf>=3.19.5,<3.20.1
3
+ transformers>=4.26.1
4
+ icetk
5
+ cpm_kernels