from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer import gradio as gr import torch tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") def launch(input): tokenizer.src_lang = "en" encoded_hi = tokenizer(input, return_tensors="pt") generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("it")) response = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) return response iface = gr.Interface(launch, inputs="text", outputs="text") iface.launch()