Zeebachu commited on
Commit
cf4f813
ยท
verified ยท
1 Parent(s): 2b17e3b

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import torch
4
+
5
+ # ๋ชจ๋ธ ๋กœ๋“œ
6
+ tokenizer = AutoTokenizer.from_pretrained("noahkim/KoT5_news_summarization")
7
+ model = AutoModelForSeq2SeqLM.from_pretrained("noahkim/KoT5_news_summarization")
8
+
9
+ # GPU ์„ค์ •
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ model = model.to(device)
12
+
13
+ # ํ…์ŠคํŠธ ์š”์•ฝ ํ•จ์ˆ˜
14
+ def summarize_text(input_text):
15
+ inputs = tokenizer(input_text, return_tensors="pt", padding="max_length", truncation=True, max_length=2048)
16
+ inputs = {key: value.to(device) for key, value in inputs.items()}
17
+
18
+ summary_text_ids = model.generate(
19
+ input_ids=inputs['input_ids'],
20
+ attention_mask=inputs['attention_mask'],
21
+ max_length=512,
22
+ min_length=128,
23
+ num_beams=6,
24
+ repetition_penalty=1.5,
25
+ no_repeat_ngram_size=15,
26
+ )
27
+
28
+ summary_text = tokenizer.decode(summary_text_ids[0], skip_special_tokens=True)
29
+ return summary_text
30
+
31
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜
32
+ iface = gr.Interface(
33
+ fn=summarize_text,
34
+ inputs=gr.Textbox(label="Input Text"),
35
+ outputs=gr.Textbox(label="Summary")
36
+ )
37
+
38
+ # Space์—์„œ ๋ฐ”๋กœ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ๋„๋ก ์‹คํ–‰
39
+ iface.launch()