suinY00N commited on
Commit
4ea9451
โ€ข
1 Parent(s): 0200696

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -41
app.py CHANGED
@@ -15,7 +15,7 @@ interface = gr.Interface(
15
  fn=get_sentiment, # ํ˜ธ์ถœ๋  ํ•จ์ˆ˜
16
  inputs=gr.inputs.Textbox(lines=2, placeholder="์—ฌ๊ธฐ์— ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”..."), # ์ž…๋ ฅ๋ž€ ์„ค์ •
17
  outputs="text", # ์ถœ๋ ฅ ํ˜•์‹
18
- title="ํ…์ŠคํŠธ ๊ฐ์„ฑ ๋ถ„์„", # UI ์ œ๋ชฉ
19
  description="์ด ์•ฑ์€ ์ž…๋ ฅ๋œ ํ…์ŠคํŠธ์˜ ๊ฐ์„ฑ์„ ๋ถ„์„ํ•ฉ๋‹ˆ๋‹ค. ๊ธ์ •์ ์ด๊ฑฐ๋‚˜ ๋ถ€์ •์ ์ธ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค.", # UI ์„ค๋ช…
20
  examples=[["์ด ์ œํ’ˆ์€ ์ •๋ง ์ข‹์Šต๋‹ˆ๋‹ค!"], ["์ œ ๊ธฐ๋Œ€์— ๋ชป ๋ฏธ์ณค์–ด์š”."]], # ์˜ˆ์‹œ ์ž…๋ ฅ
21
  theme="default", # UI ํ…Œ๋งˆ
@@ -25,45 +25,5 @@ interface = gr.Interface(
25
  # Gradio ์•ฑ ์‹คํ–‰
26
  interface.launch()
27
 
28
- # import gradio as gr
29
-
30
- # def get_pipe():
31
- # from transformers import AutoTokenizer, AutoModelForCausalLM
32
- # model_name = "heegyu/koalpaca-355m"
33
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
34
- # tokenizer.truncation_side = "right"
35
- # model = AutoModelForCausalLM.from_pretrained(model_name)
36
- # return model, tokenizer
37
-
38
- # def get_response(tokenizer, model, context):
39
- # context = f"<usr>{context}\n<sys>"
40
- # inputs = tokenizer(
41
- # context,
42
- # truncation=True,
43
- # max_length=512,
44
- # return_tensors="pt")
45
-
46
- # generation_args = dict(
47
- # max_length=256,
48
- # min_length=64,
49
- # eos_token_id=2,
50
- # do_sample=True,
51
- # top_p=1.0,
52
- # early_stopping=True
53
- # )
54
-
55
- # outputs = model.generate(**inputs, **generation_args)
56
- # response = tokenizer.decode(outputs[0])
57
- # print(context)
58
- # print(response)
59
- # response = response[len(context):].replace("</s>", "")
60
-
61
- # return response
62
-
63
- # model, tokenizer = get_pipe()
64
-
65
- # def ask_question(input_):
66
- # response = get_response(tokenizer, model, input_)
67
- # return response
68
 
69
  # gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()
 
15
  fn=get_sentiment, # ํ˜ธ์ถœ๋  ํ•จ์ˆ˜
16
  inputs=gr.inputs.Textbox(lines=2, placeholder="์—ฌ๊ธฐ์— ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”..."), # ์ž…๋ ฅ๋ž€ ์„ค์ •
17
  outputs="text", # ์ถœ๋ ฅ ํ˜•์‹
18
+ title="Sentiment Analysis", # UI ์ œ๋ชฉ
19
  description="์ด ์•ฑ์€ ์ž…๋ ฅ๋œ ํ…์ŠคํŠธ์˜ ๊ฐ์„ฑ์„ ๋ถ„์„ํ•ฉ๋‹ˆ๋‹ค. ๊ธ์ •์ ์ด๊ฑฐ๋‚˜ ๋ถ€์ •์ ์ธ ๊ฒฐ๊ณผ๋ฅผ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค.", # UI ์„ค๋ช…
20
  examples=[["์ด ์ œํ’ˆ์€ ์ •๋ง ์ข‹์Šต๋‹ˆ๋‹ค!"], ["์ œ ๊ธฐ๋Œ€์— ๋ชป ๋ฏธ์ณค์–ด์š”."]], # ์˜ˆ์‹œ ์ž…๋ ฅ
21
  theme="default", # UI ํ…Œ๋งˆ
 
25
  # Gradio ์•ฑ ์‹คํ–‰
26
  interface.launch()
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # gr.Interface(fn=ask_question, inputs="text", outputs="text", title="KoAlpaca-355M", description="ํ•œ๊ตญ์–ด๋กœ ์งˆ๋ฌธํ•˜์„ธ์š”.").launch()