suinY00N commited on
Commit
8d37a8d
โ€ข
1 Parent(s): 55464d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -38
app.py CHANGED
@@ -1,45 +1,71 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
 
4
  sentiment = pipeline("sentiment-analysis")
5
 
 
6
  def get_sentiment(์ž…๋ ฅ):
7
- # from transformers import AutoTokenizer, AutoModelForCausalLM
8
- # model_name = "heegyu/koalpaca-355m"
9
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- # tokenizer.truncation_side = "right"
11
- # model = AutoModelForCausalLM.from_pretrained(model_name)
12
- return sentiment(์ž…๋ ฅ)
13
-
14
- def get_response(output):
15
- context = f"<usr>{context}\n<sys>"
16
- inputs = tokenizer(
17
- context,
18
- truncation=True,
19
- max_length=512,
20
- return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- generation_args = dict(
23
- max_length=256,
24
- min_length=64,
25
- eos_token_id=2,
26
- do_sample=True,
27
- top_p=1.0,
28
- early_stopping=True
29
- )
30
-
31
- outputs = model.generate(**inputs, **generation_args)
32
- response = tokenizer.decode(outputs[0])
33
- print(context)
34
- print(response)
35
- response = response[len(context):].replace("</s>", "")
36
-
37
- return response
38
-
39
- model, tokenizer = get_pipe()
40
-
41
- def ask_question(input_):
42
- response = get_response(tokenizer, model, input_)
43
- return response
44
-
45
- gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # ๊ฐ์„ฑ ๋ถ„์„ ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
5
  sentiment = pipeline("sentiment-analysis")
6
 
7
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์— ๋Œ€ํ•œ ๊ฐ์„ฑ ๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ˜ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜
8
  def get_sentiment(์ž…๋ ฅ):
9
+ # ๊ฐ์„ฑ ๋ถ„์„ ์‹คํ–‰
10
+ result = sentiment(์ž…๋ ฅ)
11
+ # ๊ฒฐ๊ณผ ํฌ๋งทํŒ… ๋ฐ ๋ฐ˜ํ™˜
12
+ return result[0]
13
+
14
+ # Gradio ์•ฑ ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
15
+ iface = gr.Interface(
16
+ fn=get_sentiment, # ์‹คํ–‰ํ•  ํ•จ์ˆ˜
17
+ inputs=gr.inputs.Textbox(lines=2, placeholder="์—ฌ๊ธฐ์— ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”..."), # ์ž…๋ ฅ๋ž€ ์„ค์ •
18
+ outputs="json", # ์ถœ๋ ฅ ํ˜•ํƒœ
19
+ clear_on_submit=True, # ์ œ์ถœ ํ›„ ์ž…๋ ฅ๋ž€ ํด๋ฆฌ์–ด
20
+ title="ํ…์ŠคํŠธ ๊ฐ์„ฑ ๋ถ„์„", # UI ์ œ๋ชฉ
21
+ description="ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜๊ณ  ์ œ์ถœ ๋ฒ„ํŠผ์„ ํด๋ฆญํ•˜์—ฌ ๊ฐ์„ฑ ๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ ํ™•์ธํ•˜์„ธ์š”." # UI ์„ค๋ช…
22
+ )
23
+
24
+ # Gradio ์•ฑ ์‹คํ–‰
25
+ iface.launch()
26
+
27
+ # import gradio as gr
28
+ # from transformers import pipeline
29
+
30
+ # sentiment = pipeline("sentiment-analysis")
31
+
32
+ # def get_sentiment(์ž…๋ ฅ):
33
+ # # from transformers import AutoTokenizer, AutoModelForCausalLM
34
+ # # model_name = "heegyu/koalpaca-355m"
35
+ # # tokenizer = AutoTokenizer.from_pretrained(model_name)
36
+ # # tokenizer.truncation_side = "right"
37
+ # # model = AutoModelForCausalLM.from_pretrained(model_name)
38
+ # return sentiment(์ž…๋ ฅ)
39
+
40
+ # def get_response(output):
41
+ # context = f"<usr>{context}\n<sys>"
42
+ # inputs = tokenizer(
43
+ # context,
44
+ # truncation=True,
45
+ # max_length=512,
46
+ # return_tensors="pt")
47
 
48
+ # generation_args = dict(
49
+ # max_length=256,
50
+ # min_length=64,
51
+ # eos_token_id=2,
52
+ # do_sample=True,
53
+ # top_p=1.0,
54
+ # early_stopping=True
55
+ # )
56
+
57
+ # outputs = model.generate(**inputs, **generation_args)
58
+ # response = tokenizer.decode(outputs[0])
59
+ # print(context)
60
+ # print(response)
61
+ # response = response[len(context):].replace("</s>", "")
62
+
63
+ # return response
64
+
65
+ # model, tokenizer = get_pipe()
66
+
67
+ # def ask_question(input_):
68
+ # response = get_response(tokenizer, model, input_)
69
+ # return response
70
+
71
+ # gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()