#!pip install gradio import os import torch import gradio as gr import time from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline def translation(text): model_checkpoint = "bigscience/mt0-base" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) #inference inputs = tokenizer("translate to darija : " + text, return_tensors="pt") output = model.generate(**inputs) output = tokenizer.decode(output.cpu().numpy()[0], skip_special_tokens=True) return output if __name__ == '__main__': print('\tinit models') #inputs = [gr.inputs.Radio(['nllb-distilled-600M', 'nllb-1.3B', 'nllb-distilled-1.3B'], label='NLLB Model'), inputs = [gr.inputs.Textbox(lines=5, label="Input text")] outputs = gr.outputs.Textbox(label="Input text") title = "Derej M3aaya" demo_status = "Demo is running on CPU" description = f"Details: https://github.com/facebookresearch/fairseq/tree/nllb. {demo_status}" examples = [ ['English', 'Darija', 'Hi nice to meet you'] ] gr.Interface(translation, inputs, outputs, title=title, description=description, ).launch()