import gradio as gr import random import requests from PIL import Image from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline # from dotenv import load_dotenv # Load the translation model translation_model = AutoModelForSeq2SeqLM.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en") tokenizer = AutoTokenizer.from_pretrained("KarmaCST/nllb-200-distilled-600M-dz-to-en") src_lang="dzo_Tibt" tgt_lang="eng_Latn" # def translate_dzongkha_to_english(text): # translation_pipeline = pipeline("translation", # model=translation_model, # tokenizer=tokenizer, # src_lang=src_lang, # tgt_lang=tgt_lang) # translated_text = translation_pipeline(text)[0]['translation_text'] # return translated_text model = gr.load("models/Purz/face-projection") def generate_image(text, seed): translation_pipeline = pipeline("translation", model=translation_model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) text = translation_pipeline(text)[0]['translation_text'] if seed is not None: random.seed(seed) if text in [example[0] for example in examples]: print(f"Using example: {text}") return model(text) examples=[ ["བྱི་ཅུང་ཚུ་གངས་རི་གི་ཐོག་ཁར་འཕུར།", None], ["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་གནམ་གྲུ་འཕུར།",None], ["པཱ་རོ་ཁྲོམ་གྱི་ཐོག་ཁར་ ཤིང་ཚུ་གི་བར་ན་ གནམ་གྲུ་འཕུར་བའི་འཐོང་གནང་།",None], ["སློབ་ཕྲུག་ཚུ་ ཆརཔ་ནང་རྐང་རྩེད་རྩེ་དེས།"] ] # examples = [ # ["Humanoid Cat Warrior, Full View", None], # ["Warhammer Sisterhood", None], # ["Future Robots war", None], # ["Fantasy dragon", None] # ] interface = gr.Interface( fn=translate_dzongkha_to_english, inputs=[ gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."), gr.Slider(minimum=0, maximum=10000, step=1, label="Seed (optional)") ], outputs=gr.Image(label="Generated Image"), examples=examples, theme="NoCrypt/miku", description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.", ) interface.launch()