Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
from __future__ import annotations | |
import os | |
import shlex | |
import subprocess | |
import gradio as gr | |
import numpy as np | |
import torch | |
from modelscope.pipelines import pipeline | |
from modelscope.utils.constant import Tasks | |
if os.getenv('SYSTEM') == 'spaces': | |
subprocess.run( | |
shlex.split( | |
'pip install git+https://github.com/modelscope/modelscope.git@refs/pull/173/head' | |
)) | |
DESCRIPTION = '# [ModelScope Chinese text2image (tiny)](https://www.modelscope.cn/models/damo/cv_diffusion_text-to-image-synthesis_tiny/summary)' | |
SPACE_ID = os.getenv('SPACE_ID') | |
if SPACE_ID is not None: | |
DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>' | |
pipe = pipeline(Tasks.text_to_image_synthesis, | |
'damo/cv_diffusion_text-to-image-synthesis_tiny') | |
def run( | |
text: str, | |
seed: int, | |
num_steps_generator: int, | |
num_steps_upscaler1: int, | |
num_steps_upscaler2: int, | |
guidance_scale: float, | |
) -> np.ndarray: | |
torch.manual_seed(seed) | |
results = pipe({ | |
'text': text, | |
'solver': 'ddim', | |
'generator_ddim_timesteps': num_steps_generator, | |
'upsampler_256_ddim_timesteps': num_steps_upscaler1, | |
'upsampler_1024_ddim_timesteps': num_steps_upscaler2, | |
'generator_guide_scale': guidance_scale, | |
}) | |
return results['output_imgs'][0] | |
examples = [ | |
['中国山水画', 0, 250, 50, 20, 5.0], | |
] | |
with gr.Blocks(css='style.css') as demo: | |
gr.Markdown(DESCRIPTION) | |
with gr.Row(): | |
with gr.Column(): | |
text = gr.Text(label='Prompt') | |
seed = gr.Slider(label='Seed', | |
minimum=0, | |
maximum=100000, | |
value=0, | |
step=1, | |
randomize=True) | |
run_button = gr.Button('Run') | |
with gr.Accordion('Advanced options', open=False): | |
num_steps_generator = gr.Slider(label='Steps (Generator)', | |
minimum=1, | |
maximum=1000, | |
value=250, | |
step=1) | |
num_steps_upscaler1 = gr.Slider( | |
label='Steps (Upscaler 64=>256)', | |
minimum=1, | |
maximum=50, | |
value=50, | |
step=1) | |
num_steps_upscaler2 = gr.Slider( | |
label='Steps (Upscaler 256=>1024)', | |
minimum=1, | |
maximum=20, | |
value=20, | |
step=1) | |
guidance_scale = gr.Slider(label='Guidance scale', | |
minimum=0, | |
maximum=100, | |
value=5.0, | |
step=0.1) | |
with gr.Column(): | |
result = gr.Image(label='Output') | |
inputs = [ | |
text, | |
seed, | |
num_steps_generator, | |
num_steps_upscaler1, | |
num_steps_upscaler2, | |
guidance_scale, | |
] | |
with gr.Row(): | |
gr.Examples(examples=examples, | |
inputs=inputs, | |
outputs=result, | |
fn=run, | |
cache_examples=True) | |
text.submit(fn=run, inputs=inputs, outputs=result) | |
run_button.click(fn=run, inputs=inputs, outputs=result) | |
demo.queue(api_open=False).launch() | |