Spaces:
Running
Running
mateoluksenberg
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -228,9 +228,9 @@ def simple_chat(message, temperature: float = 0.8, max_length: int = 4096, top_p
|
|
228 |
with open(message['file_name'], 'wb') as temp_file:
|
229 |
temp_file.write(message['file_content'].read())
|
230 |
temp_file_path = temp_file.name
|
231 |
-
|
232 |
choice, contents = mode_load(temp_file_path)
|
233 |
-
|
234 |
if choice == "image":
|
235 |
conversation.append({"role": "user", "image": contents, "content": message['text']})
|
236 |
elif choice == "doc":
|
@@ -266,50 +266,22 @@ def simple_chat(message, temperature: float = 0.8, max_length: int = 4096, top_p
|
|
266 |
model.generate(**gen_kwargs)
|
267 |
for new_text in streamer:
|
268 |
buffer += new_text
|
269 |
-
# thread = Thread(target=model.generate, kwargs=gen_kwargs)
|
270 |
-
# thread.start()
|
271 |
-
# for new_text in streamer:
|
272 |
-
# buffer += new_text
|
273 |
|
274 |
-
|
275 |
thread = Thread(target=generate_text)
|
276 |
thread.start()
|
277 |
|
278 |
thread.join()
|
279 |
-
|
280 |
-
# # Start the generation in a separate thread
|
281 |
-
# generate_text()
|
282 |
-
|
283 |
-
# # Wait for the generation to finish
|
284 |
-
# thread.join()
|
285 |
|
286 |
print("---------")
|
287 |
print("Text: ")
|
288 |
print(buffer)
|
289 |
print("---------")
|
290 |
|
291 |
-
# with torch.no_grad():
|
292 |
-
# thread = Thread(target=model.generate, kwargs=gen_kwargs)
|
293 |
-
# thread.start()
|
294 |
-
# buffer = ""
|
295 |
-
# for new_text in streamer:
|
296 |
-
# buffer += new_text
|
297 |
-
# yield buffer
|
298 |
-
|
299 |
-
# print("---------")
|
300 |
-
# print("Text: ")
|
301 |
-
# print(buffer)
|
302 |
-
# print("---------")
|
303 |
-
|
304 |
-
# return PlainTextResponse(buffer)
|
305 |
-
|
306 |
return PlainTextResponse(buffer)
|
307 |
|
308 |
|
309 |
|
310 |
|
311 |
-
|
312 |
-
|
313 |
# @spaces.GPU()
|
314 |
# def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
|
315 |
# try:
|
|
|
228 |
with open(message['file_name'], 'wb') as temp_file:
|
229 |
temp_file.write(message['file_content'].read())
|
230 |
temp_file_path = temp_file.name
|
231 |
+
|
232 |
choice, contents = mode_load(temp_file_path)
|
233 |
+
|
234 |
if choice == "image":
|
235 |
conversation.append({"role": "user", "image": contents, "content": message['text']})
|
236 |
elif choice == "doc":
|
|
|
266 |
model.generate(**gen_kwargs)
|
267 |
for new_text in streamer:
|
268 |
buffer += new_text
|
|
|
|
|
|
|
|
|
269 |
|
|
|
270 |
thread = Thread(target=generate_text)
|
271 |
thread.start()
|
272 |
|
273 |
thread.join()
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
|
275 |
print("---------")
|
276 |
print("Text: ")
|
277 |
print(buffer)
|
278 |
print("---------")
|
279 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
return PlainTextResponse(buffer)
|
281 |
|
282 |
|
283 |
|
284 |
|
|
|
|
|
285 |
# @spaces.GPU()
|
286 |
# def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
|
287 |
# try:
|