cutechicken commited on
Commit
e468070
โ€ข
1 Parent(s): 0126cba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -61
app.py CHANGED
@@ -281,67 +281,6 @@ def init_msg():
281
  return "ํŒŒ์ผ์„ ๋ถ„์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค..."
282
 
283
 
284
-
285
- @spaces.GPU
286
- def stream_chat(message: str, history: list, uploaded_file, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
287
- try:
288
- print(f'message is - {message}')
289
- print(f'history is - {history}')
290
-
291
- # ํŒŒ์ผ ์—…๋กœ๋“œ ์ฒ˜๋ฆฌ
292
- file_context = ""
293
- if uploaded_file:
294
- content, file_type = read_uploaded_file(uploaded_file)
295
- if content:
296
- file_context = f"\n\n์—…๋กœ๋“œ๋œ ํŒŒ์ผ ๋‚ด์šฉ:\n```\n{content}\n```"
297
-
298
- # ๊ด€๋ จ ์ปจํ…์ŠคํŠธ ์ฐพ๊ธฐ
299
- relevant_contexts = find_relevant_context(message)
300
- wiki_context = "\n\n๊ด€๋ จ ์œ„ํ‚คํ”ผ๋””์•„ ์ •๋ณด:\n"
301
- for ctx in relevant_contexts:
302
- wiki_context += f"Q: {ctx['question']}\nA: {ctx['answer']}\n์œ ์‚ฌ๋„: {ctx['similarity']:.3f}\n\n"
303
-
304
- # ๋Œ€ํ™” ํžˆ์Šคํ† ๋ฆฌ ๊ตฌ์„ฑ
305
- conversation = []
306
- for prompt, answer in history:
307
- conversation.extend([
308
- {"role": "user", "content": prompt},
309
- {"role": "assistant", "content": answer}
310
- ])
311
-
312
- # ์ตœ์ข… ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
313
- final_message = file_context + wiki_context + "\nํ˜„์žฌ ์งˆ๋ฌธ: " + message
314
- conversation.append({"role": "user", "content": final_message})
315
-
316
- input_ids = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
317
- inputs = tokenizer(input_ids, return_tensors="pt").to(0)
318
-
319
- streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
320
-
321
- generate_kwargs = dict(
322
- inputs,
323
- streamer=streamer,
324
- top_k=top_k,
325
- top_p=top_p,
326
- repetition_penalty=penalty,
327
- max_new_tokens=max_new_tokens,
328
- do_sample=True,
329
- temperature=temperature,
330
- eos_token_id=[255001],
331
- )
332
-
333
- thread = Thread(target=model.generate, kwargs=generate_kwargs)
334
- thread.start()
335
-
336
- buffer = ""
337
- for new_text in streamer:
338
- buffer += new_text
339
- yield "", history + [[message, buffer]]
340
-
341
- except Exception as e:
342
- error_message = f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}"
343
- yield "", history + [[message, error_message]]
344
-
345
  CSS = """
346
  /* 3D ์Šคํƒ€์ผ CSS */
347
  :root {
@@ -518,6 +457,80 @@ body {
518
  }
519
  """
520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
521
  def create_demo():
522
  with gr.Blocks(css=CSS) as demo:
523
  with gr.Column():
@@ -554,6 +567,23 @@ def create_demo():
554
  elem_classes="send-button custom-button",
555
  scale=1
556
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557
 
558
  with gr.Accordion("๐ŸŽฎ ๊ณ ๊ธ‰ ์„ค์ •", open=False):
559
  with gr.Row():
@@ -580,6 +610,9 @@ def create_demo():
580
  label="๋ฐ˜๋ณต ์–ต์ œ ๐Ÿ”„"
581
  )
582
 
 
 
 
583
  # Examples ์œ„์น˜ ์ˆ˜์ •
584
  gr.Examples(
585
  examples=[
 
281
  return "ํŒŒ์ผ์„ ๋ถ„์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค..."
282
 
283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  CSS = """
285
  /* 3D ์Šคํƒ€์ผ CSS */
286
  :root {
 
457
  }
458
  """
459
 
460
+ @spaces.GPU
461
+ def stream_chat(message: str, history: list, uploaded_file, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
462
+ try:
463
+ print(f'message is - {message}')
464
+ print(f'history is - {history}')
465
+
466
+ # ํŒŒ์ผ ์—…๋กœ๋“œ ์ฒ˜๋ฆฌ
467
+ file_context = ""
468
+ if uploaded_file and message == "ํŒŒ์ผ์„ ๋ถ„์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค...":
469
+ try:
470
+ content, file_type = read_uploaded_file(uploaded_file)
471
+ if content:
472
+ file_analysis = analyze_file_content(content, file_type)
473
+ file_context = f"\n\n๐Ÿ“„ ํŒŒ์ผ ๋ถ„์„ ๊ฒฐ๊ณผ:\n{file_analysis}\n\nํŒŒ์ผ ๋‚ด์šฉ:\n```\n{content}\n```"
474
+ message = "์—…๋กœ๋“œ๋œ ํŒŒ์ผ์„ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”."
475
+ except Exception as e:
476
+ print(f"ํŒŒ์ผ ๋ถ„์„ ์˜ค๋ฅ˜: {str(e)}")
477
+ file_context = f"\n\nโŒ ํŒŒ์ผ ๋ถ„์„ ์ค‘ ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}"
478
+
479
+ # ๊ด€๋ จ ์ปจํ…์ŠคํŠธ ์ฐพ๊ธฐ
480
+ try:
481
+ relevant_contexts = find_relevant_context(message)
482
+ wiki_context = "\n\n๊ด€๋ จ ์œ„ํ‚คํ”ผ๋””์•„ ์ •๋ณด:\n"
483
+ for ctx in relevant_contexts:
484
+ wiki_context += f"Q: {ctx['question']}\nA: {ctx['answer']}\n์œ ์‚ฌ๋„: {ctx['similarity']:.3f}\n\n"
485
+ except Exception as e:
486
+ print(f"์ปจํ…์ŠคํŠธ ๊ฒ€์ƒ‰ ์˜ค๋ฅ˜: {str(e)}")
487
+ wiki_context = ""
488
+
489
+ # ๋Œ€ํ™” ํžˆ์Šคํ† ๋ฆฌ ๊ตฌ์„ฑ
490
+ conversation = []
491
+ for prompt, answer in history:
492
+ conversation.extend([
493
+ {"role": "user", "content": prompt},
494
+ {"role": "assistant", "content": answer}
495
+ ])
496
+
497
+ # ์ตœ์ข… ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
498
+ final_message = file_context + wiki_context + "\nํ˜„์žฌ ์งˆ๋ฌธ: " + message
499
+ conversation.append({"role": "user", "content": final_message})
500
+
501
+ # ํ† ํฌ๋‚˜์ด์ € ์„ค์ •
502
+ input_ids = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
503
+ inputs = tokenizer(input_ids, return_tensors="pt").to(0)
504
+
505
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
506
+
507
+ generate_kwargs = dict(
508
+ inputs,
509
+ streamer=streamer,
510
+ top_k=top_k,
511
+ top_p=top_p,
512
+ repetition_penalty=penalty,
513
+ max_new_tokens=max_new_tokens,
514
+ do_sample=True,
515
+ temperature=temperature,
516
+ eos_token_id=[255001],
517
+ )
518
+
519
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
520
+ thread.start()
521
+
522
+ buffer = ""
523
+ for new_text in streamer:
524
+ buffer += new_text
525
+ yield "", history + [[message, buffer]]
526
+
527
+ except Exception as e:
528
+ error_message = f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}"
529
+ print(f"Stream chat ์˜ค๋ฅ˜: {error_message}")
530
+ yield "", history + [[message, error_message]]
531
+
532
+
533
+
534
  def create_demo():
535
  with gr.Blocks(css=CSS) as demo:
536
  with gr.Column():
 
567
  elem_classes="send-button custom-button",
568
  scale=1
569
  )
570
+
571
+ # ์ด๋ฒคํŠธ ๋ฐ”์ธ๋”ฉ ์ˆ˜์ •
572
+ def init_msg():
573
+ return "ํŒŒ์ผ์„ ๋ถ„์„ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค..."
574
+
575
+ file_upload.change(
576
+ fn=init_msg,
577
+ outputs=msg,
578
+ queue=False
579
+ ).then(
580
+ fn=stream_chat,
581
+ inputs=[msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty],
582
+ outputs=[msg, chatbot],
583
+ queue=True
584
+ )
585
+
586
+
587
 
588
  with gr.Accordion("๐ŸŽฎ ๊ณ ๊ธ‰ ์„ค์ •", open=False):
589
  with gr.Row():
 
610
  label="๋ฐ˜๋ณต ์–ต์ œ ๐Ÿ”„"
611
  )
612
 
613
+
614
+
615
+
616
  # Examples ์œ„์น˜ ์ˆ˜์ •
617
  gr.Examples(
618
  examples=[