cutechicken commited on
Commit
2db4e16
โ€ข
1 Parent(s): 5416ad2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -45
app.py CHANGED
@@ -1,78 +1,71 @@
1
  import os
2
  from dotenv import load_dotenv
3
  import gradio as gr
4
- from huggingface_hub import InferenceClient
5
  import pandas as pd
6
  import json
7
  from datetime import datetime
8
  import torch
9
- from transformers import AutoModelForCausalLM, AutoTokenizer
10
 
11
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
12
  HF_TOKEN = os.getenv("HF_TOKEN")
13
  MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
14
 
15
- from transformers import pipeline
16
-
17
  class ModelManager:
18
  def __init__(self):
19
- self.pipe = None
20
- self.setup_pipeline()
 
21
 
22
- def setup_pipeline(self):
23
  try:
24
- print("ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ์‹œ์ž‘...")
25
- self.pipe = pipeline(
26
- "text-generation",
27
- model=MODEL_ID,
 
 
 
28
  token=HF_TOKEN,
29
- device_map="auto",
30
- torch_dtype=torch.float16
31
  )
32
- print("ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ์™„๋ฃŒ")
33
  except Exception as e:
34
- print(f"ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {e}")
35
- raise Exception(f"ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ์‹คํŒจ: {e}")
36
 
37
  def generate_response(self, messages, max_tokens=4000, temperature=0.7, top_p=0.9):
38
  try:
39
- # ๋ฉ”์‹œ์ง€ ํ˜•์‹ ๋ณ€ํ™˜
40
- prompt = ""
41
- for msg in messages:
42
- role = msg["role"]
43
- content = msg["content"]
44
- if role == "system":
45
- prompt += f"System: {content}\n"
46
- elif role == "user":
47
- prompt += f"User: {content}\n"
48
- elif role == "assistant":
49
- prompt += f"Assistant: {content}\n"
50
-
51
- # ์‘๋‹ต ์ƒ์„ฑ
52
- response = self.pipe(
53
- prompt,
54
  max_new_tokens=max_tokens,
 
55
  temperature=temperature,
56
  top_p=top_p,
57
- do_sample=True,
58
- num_return_sequences=1,
59
- pad_token_id=self.pipe.tokenizer.eos_token_id
60
  )
61
-
62
- # ์‘๋‹ต ํ…์ŠคํŠธ ์ถ”์ถœ ๋ฐ ์ŠคํŠธ๋ฆฌ๋ฐ ์‹œ๋ฎฌ๋ ˆ์ด์…˜
63
- generated_text = response[0]['generated_text'][len(prompt):].strip()
64
- words = generated_text.split()
65
-
66
- # ๋‹จ์–ด ๋‹จ์œ„๋กœ ์ŠคํŠธ๋ฆฌ๋ฐ
67
- partial_response = ""
68
- for word in words:
69
- partial_response += word + " "
70
  yield type('Response', (), {
71
  'choices': [type('Choice', (), {
72
- 'delta': {'content': word + " "}
73
  })()]
74
  })()
75
-
76
  except Exception as e:
77
  raise Exception(f"์‘๋‹ต ์ƒ์„ฑ ์‹คํŒจ: {e}")
78
 
 
1
  import os
2
  from dotenv import load_dotenv
3
  import gradio as gr
 
4
  import pandas as pd
5
  import json
6
  from datetime import datetime
7
  import torch
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
11
  HF_TOKEN = os.getenv("HF_TOKEN")
12
  MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
13
 
 
 
14
  class ModelManager:
15
  def __init__(self):
16
+ self.tokenizer = None
17
+ self.model = None
18
+ self.setup_model()
19
 
20
+ def setup_model(self):
21
  try:
22
+ print("ํ† ํฌ๋‚˜์ด์ € ๋กœ๋”ฉ ์‹œ์ž‘...")
23
+ self.tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, token=HF_TOKEN)
24
+ print("ํ† ํฌ๋‚˜์ด์ € ๋กœ๋”ฉ ์™„๋ฃŒ")
25
+
26
+ print("๋ชจ๋ธ ๋กœ๋”ฉ ์‹œ์ž‘...")
27
+ self.model = AutoModelForCausalLM.from_pretrained(
28
+ MODEL_ID,
29
  token=HF_TOKEN,
30
+ torch_dtype=torch.float16,
31
+ device_map="auto"
32
  )
33
+ print("๋ชจ๋ธ ๋กœ๋”ฉ ์™„๋ฃŒ")
34
  except Exception as e:
35
+ print(f"๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {e}")
36
+ raise Exception(f"๋ชจ๋ธ ๋กœ๋”ฉ ์‹คํŒจ: {e}")
37
 
38
  def generate_response(self, messages, max_tokens=4000, temperature=0.7, top_p=0.9):
39
  try:
40
+ # ์ฑ„ํŒ… ํ…œํ”Œ๋ฆฟ ์ ์šฉ
41
+ input_ids = self.tokenizer.apply_chat_template(
42
+ messages,
43
+ tokenize=True,
44
+ add_generation_prompt=True,
45
+ return_tensors="pt"
46
+ ).to(self.model.device)
47
+
48
+ # ํ† ํฐ ์ƒ์„ฑ
49
+ gen_tokens = self.model.generate(
50
+ input_ids,
 
 
 
 
51
  max_new_tokens=max_tokens,
52
+ do_sample=True,
53
  temperature=temperature,
54
  top_p=top_p,
55
+ pad_token_id=self.tokenizer.eos_token_id,
56
+ streamer=TextIteratorStreamer(self.tokenizer, skip_special_tokens=True)
 
57
  )
58
+
59
+ # ์‘๋‹ต ๋””์ฝ”๋”ฉ ๋ฐ ์ŠคํŠธ๋ฆฌ๋ฐ
60
+ response_text = ""
61
+ for new_text in self.tokenizer.decode(gen_tokens[0], skip_special_tokens=True):
62
+ response_text += new_text
 
 
 
 
63
  yield type('Response', (), {
64
  'choices': [type('Choice', (), {
65
+ 'delta': {'content': new_text}
66
  })()]
67
  })()
68
+
69
  except Exception as e:
70
  raise Exception(f"์‘๋‹ต ์ƒ์„ฑ ์‹คํŒจ: {e}")
71