mateoluksenberg commited on
Commit
09f0255
·
verified ·
1 Parent(s): b8e22ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -82
app.py CHANGED
@@ -273,95 +273,18 @@ def simple_chat(message, temperature: float = 0.8, max_length: int = 4096, top_p
273
 
274
  thread.join()
275
 
276
- print("---------")
277
- print("Text: ")
278
- print(buffer)
279
- print("---------")
280
-
281
- # If the generated text is JSON, parse it and format it as plain text
282
  try:
283
  json_content = json.loads(buffer)
284
  formatted_text = "\n".join(f"{key}: {value}" for key, value in json_content.items())
285
  except json.JSONDecodeError:
286
- # If the buffer is not valid JSON, return it as is
287
  formatted_text = buffer
288
 
289
- return PlainTextResponse(formatted_text)
290
-
291
-
292
-
293
-
294
- # @spaces.GPU()
295
- # def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096, top_p: float = 1, top_k: int = 10, penalty: float = 1.0):
296
- # try:
297
- # model = AutoModelForCausalLM.from_pretrained(
298
- # MODEL_ID,
299
- # torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
300
- # low_cpu_mem_usage=True,
301
- # trust_remote_code=True
302
- # )
303
-
304
- # #tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
305
-
306
- # conversation = []
307
-
308
- # if "file_content" in message and message["file_content"]:
309
- # file_content = message["file_content"]
310
- # file_name = message["file_name"]
311
-
312
- # with open(file_name, "wb") as f:
313
- # f.write(file_content.read())
314
-
315
- # choice, contents = mode_load(file_name)
316
-
317
- # if choice == "image":
318
- # conversation.append({"role": "user", "image": contents, "content": message['text']})
319
- # elif choice == "doc":
320
- # message['text'] = contents + "\n\n\n" + "{} files uploaded.\n".format(1) + message['text']
321
- # conversation.append({"role": "user", "content": message['text']})
322
- # # format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format(1) + message['text']
323
- # # conversation.append({"role": "user", "content": format_msg})
324
- # else:
325
- # conversation.append({"role": "user", "content": message['text']})
326
-
327
- # input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
328
-
329
- # streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
330
-
331
- # generate_kwargs = dict(
332
- # max_length=max_length,
333
- # do_sample=True,
334
- # top_p=top_p,
335
- # top_k=top_k,
336
- # temperature=temperature,
337
- # repetition_penalty=penalty,
338
- # eos_token_id=[151329, 151336, 151338],
339
- # )
340
-
341
- # gen_kwargs = {**input_ids, **generate_kwargs}
342
-
343
- # for entry in conversation:
344
- # print(f"Role: {entry['role']}, Content: {entry.get('content', '')}")
345
-
346
- # with torch.no_grad():
347
- # generated_ids = model.generate(input_ids['input_ids'], **generate_kwargs)
348
- # generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
349
-
350
- # text_original = message['text'].strip()
351
- # generated_text_cleaned = generated_text.replace(text_original, "").strip()
352
-
353
- # print(" ")
354
- # print("---------")
355
- # print("Text: ")
356
- # print(" ")
357
- # print(generated_text_cleaned)
358
-
359
-
360
- # return PlainTextResponse(generated_text_cleaned)
361
- # except Exception as e:
362
- # return PlainTextResponse(f"Error: {str(e)}")
363
-
364
 
 
365
 
366
 
367
  @app.post("/chat/")
 
273
 
274
  thread.join()
275
 
 
 
 
 
 
 
276
  try:
277
  json_content = json.loads(buffer)
278
  formatted_text = "\n".join(f"{key}: {value}" for key, value in json_content.items())
279
  except json.JSONDecodeError:
 
280
  formatted_text = buffer
281
 
282
+ print("---------")
283
+ print("Text: ")
284
+ print(formatted_text)
285
+ print("---------")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
 
287
+ return PlainTextResponse(formatted_text)
288
 
289
 
290
  @app.post("/chat/")