cointegrated's picture
Use a newer model; add randomization; allow same lang
54fb600
raw
history blame
3.75 kB
import gradio as gr
import torch
from transformers import NllbTokenizer, AutoModelForSeq2SeqLM
MODEL_URL = 'slone/nllb-rus-tyv-v2-extvoc'
lang_to_code = {
'Орус | Русский | Russian': 'rus_Cyrl',
'Тыва | Тувинский | Tyvan': 'tyv_Cyrl',
}
def fix_tokenizer(tokenizer, new_lang='tyv_Cyrl'):
""" Add a new language token to the tokenizer vocabulary (this should be done each time after its initialization) """
old_len = len(tokenizer) - int(new_lang in tokenizer.added_tokens_encoder)
tokenizer.lang_code_to_id[new_lang] = old_len-1
tokenizer.id_to_lang_code[old_len-1] = new_lang
# always move "mask" to the last position
tokenizer.fairseq_tokens_to_ids["<mask>"] = len(tokenizer.sp_model) + len(tokenizer.lang_code_to_id) + tokenizer.fairseq_offset
tokenizer.fairseq_tokens_to_ids.update(tokenizer.lang_code_to_id)
tokenizer.fairseq_ids_to_tokens = {v: k for k, v in tokenizer.fairseq_tokens_to_ids.items()}
if new_lang not in tokenizer._additional_special_tokens:
tokenizer._additional_special_tokens.append(new_lang)
# clear the added token encoder; otherwise a new token may end up there by mistake
tokenizer.added_tokens_encoder = {}
tokenizer.added_tokens_decoder = {}
def translate(
text,
model,
tokenizer,
src_lang='rus_Cyrl',
tgt_lang='tyv_Cyrl',
max_length='auto',
num_beams=4,
no_repeat_ngram_size=4,
n_out=None,
**kwargs
):
tokenizer.src_lang = src_lang
encoded = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
if max_length == 'auto':
max_length = int(32 + 2.0 * encoded.input_ids.shape[1])
model.eval()
generated_tokens = model.generate(
**encoded.to(model.device),
forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang],
max_length=max_length,
num_beams=num_beams,
no_repeat_ngram_size=no_repeat_ngram_size,
num_return_sequences=n_out or 1,
**kwargs
)
out = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
if isinstance(text, str) and n_out is None:
return out[0]
return out
def translate_wrapper(text, src, trg, random=False):
src_lang = lang_to_code.get(src)
tgt_lang = lang_to_code.get(trg)
# if src == trg:
# return 'Please choose two different languages'
result = translate(
text=text,
model=model,
tokenizer=tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
do_sample=random,
num_beams=1 if random else 4,
)
return result
article = """
This is a NLLB-200-600M model fine-tuned for translation between Russian and Tyvan (Tuvan) languages,
using the data from https://tyvan.ru/.
**More details will be published soon!**
__Please translate one sentence at a time; the model is not working adequately with multiple sentences!__
"""
interface = gr.Interface(
translate_wrapper,
[
gr.Textbox(label="Text", lines=2, placeholder='text to translate '),
gr.Dropdown(list(lang_to_code.keys()), type="value", label='source language', value=list(lang_to_code.keys())[0]),
gr.Dropdown(list(lang_to_code.keys()), type="value", label='target language', value=list(lang_to_code.keys())[1]),
gr.Checkbox(label="random", value=False),
],
"text",
title='Tyvan-Russian translaton',
article=article,
)
if __name__ == '__main__':
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL)
if torch.cuda.is_available():
model.cuda()
tokenizer = NllbTokenizer.from_pretrained(MODEL_URL, force_download=True)
fix_tokenizer(tokenizer)
interface.launch()