ysharma's picture
ysharma HF staff
updte
3252649
import gradio as gr
import os
import requests
import time
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import paddlehub as hub
# Importing the essential libraries for monitoring
import psutil
HF_TOKEN = os.environ["HF_TOKEN"]
model = hub.Module(name='ernie_vilg')
def get_ernie_vilg(text_prompts, style):
style = style.split('-')[0]
results = model.generate_image(text_prompts=text_prompts, style=style, visualization=False)
#for CPU monitoring
# Testing the psutil library for both CPU and RAM performance details
print(f"ERNIE CPU percent is: {psutil.cpu_percent()}")
print(f"ERNIE virtual memory is : {psutil.virtual_memory().percent}")
return results[0]
sd_inf = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion", use_auth_token=HF_TOKEN)
nllb_model_name = 'facebook/nllb-200-distilled-600M'
nllb_model = AutoModelForSeq2SeqLM.from_pretrained(nllb_model_name)
nllb_tokenizer = AutoTokenizer.from_pretrained(nllb_model_name)
def get_chinese_translation(text): #in_language_first, in_language_second,
print("********Inside get_chinese_translation ********")
src = 'eng_Latn'
tgt= 'zho_Hans'
print(f"text is :{text}, source language is : {src}, target language is : {tgt} ")
translator = pipeline('translation', model=nllb_model, tokenizer=nllb_tokenizer, src_lang=src, tgt_lang=tgt)
output = translator(text, max_length=400)
print(f"initial output is:{output}")
output = output[0]['translation_text']
print(f"output is:{output}")
# for CPU monitoring
# Testing the psutil library for both CPU and RAM performance details
print(f"CPU percent is: {psutil.cpu_percent()}")
print(f"virtual memory is : {psutil.virtual_memory().percent}")
return output
#Block inference not working for stable diffusion
def get_sd(translated_txt, samples, steps, scale, seed):
print("******** Inside get_SD ********")
print(f"translated_txt is : {translated_txt}")
sd_img_gallery = sd_inf(translated_txt, samples, steps, scale, seed, fn_index=1)[0]
return sd_img_gallery
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>ERNIE in English !</center></h1>")
gr.Markdown("<h3><center>ERNIE-ViLG is a state-of-the-art text-to-image model that generates images from simplified Chinese text.</center></h3>")
gr.Markdown("<h3><center>This app helps you in checking-out ERNIE in English. Note that due to limitations on available Ram, only one image is being generated at the moment<br><br>Please access the original model here - [ERNIE-ViLG](https://huggingface.co/spaces/PaddlePaddle/ERNIE-ViLG)</center></h3>")
with gr.Row():
with gr.Column():
in_text_prompt = gr.Textbox(label="Enter English text here")
out_text_chinese = gr.Textbox(label="Text in Simplified Chinese")
b1 = gr.Button("English to Simplified Chinese")
#s1 = gr.Slider(label='samples', value=4, visible=False)
#s2 = gr.Slider(label='steps', value=45, visible=False)
#s3 = gr.Slider(label='scale', value=7.5, visible=False)
#s4 = gr.Slider(label='seed', value=1024, visible=False)
with gr.Row():
with gr.Column():
in_styles = gr.Dropdown(['水彩-WaterColor', '油画-OilPainting', '粉笔画-Painting', '卡通-Cartoon', '蜡笔画-Pencils', '儿童画-ChildrensPaintings', '探索无限-ExploringTheInfinite'])
b2 = gr.Button("Generate Images from Ernie")
out_ernie = gr.Image(type="pil", label="Ernie output for the given prompt")
#out_gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery") #.style(grid=[2, 3], height="auto")
#in_language_first = gr.Textbox(visible=False, value= 'eng_Latn') #'English'
#in_language_second = gr.Textbox(visible=False, value= 'zho_Hans') #'Chinese (Simplified)'
#out_sd = gr.Image(type="pil", label="SD output for the given prompt")
#b3 = gr.Button("Generate Images from SD")
b1.click(get_chinese_translation, in_text_prompt, out_text_chinese ) #[in_language_first, in_language_second,
b2.click(get_ernie_vilg, [out_text_chinese, in_styles], out_ernie)
#b3.click(get_sd, [in_text_prompt,s1,s2,s3,s4], out_sd) #out_gallery )
demo.launch(enable_queue=True, debug=True)