zeebachu / app.py
Zeebachu's picture
Upload app.py
cf4f813 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
# λͺ¨λΈ λ‘œλ“œ
tokenizer = AutoTokenizer.from_pretrained("noahkim/KoT5_news_summarization")
model = AutoModelForSeq2SeqLM.from_pretrained("noahkim/KoT5_news_summarization")
# GPU μ„€μ •
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# ν…μŠ€νŠΈ μš”μ•½ ν•¨μˆ˜
def summarize_text(input_text):
inputs = tokenizer(input_text, return_tensors="pt", padding="max_length", truncation=True, max_length=2048)
inputs = {key: value.to(device) for key, value in inputs.items()}
summary_text_ids = model.generate(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
max_length=512,
min_length=128,
num_beams=6,
repetition_penalty=1.5,
no_repeat_ngram_size=15,
)
summary_text = tokenizer.decode(summary_text_ids[0], skip_special_tokens=True)
return summary_text
# Gradio μΈν„°νŽ˜μ΄μŠ€ μ •μ˜
iface = gr.Interface(
fn=summarize_text,
inputs=gr.Textbox(label="Input Text"),
outputs=gr.Textbox(label="Summary")
)
# Spaceμ—μ„œ λ°”λ‘œ μ‹€ν–‰ν•  수 μžˆλ„λ‘ μ‹€ν–‰
iface.launch()