|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
|
import torch |
|
|
|
LANGS = ["kin_Latn","eng_Latn"] |
|
TASK = "translation" |
|
|
|
|
|
|
|
|
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
|
|
general_model = AutoModelForSeq2SeqLM.from_pretrained("mbazaNLP/Nllb_finetuned_general_en_kin") |
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("mbazaNLP/Nllb_finetuned_general_en_kin") |
|
|
|
def translate(CKPT,text, src_lang, tgt_lang, max_length=400): |
|
|
|
""" |
|
Translate the text from source lang to target lang |
|
""" |
|
|
|
|
|
translation_pipeline = pipeline(TASK, |
|
model=general_model, |
|
tokenizer=tokenizer, |
|
src_lang=src_lang, |
|
tgt_lang=tgt_lang, |
|
max_length=max_length, |
|
device=device) |
|
|
|
result = translation_pipeline(text) |
|
return result[0]['translation_text'] |
|
|
|
|
|
gr.Interface( |
|
translate, |
|
[ |
|
|
|
gr.components.Textbox(label="Text"), |
|
gr.components.Dropdown(label="Source Language", choices=LANGS), |
|
gr.components.Dropdown(label="Target Language", choices=LANGS), |
|
|
|
], |
|
["text"], |
|
|
|
|
|
cache_examples=False, |
|
title="Finetuned-NLLB-EN-KIN", |
|
|
|
).launch() |
|
|
|
|
|
|