Spaces:
Runtime error
Runtime error
Add files
Browse files- .pre-commit-config.yaml +37 -0
- .style.yapf +5 -0
- README.md +2 -2
- app.py +111 -0
- requirements.txt +3 -0
- style.css +3 -0
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
exclude: patch
|
2 |
+
repos:
|
3 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
4 |
+
rev: v4.2.0
|
5 |
+
hooks:
|
6 |
+
- id: check-executables-have-shebangs
|
7 |
+
- id: check-json
|
8 |
+
- id: check-merge-conflict
|
9 |
+
- id: check-shebang-scripts-are-executable
|
10 |
+
- id: check-toml
|
11 |
+
- id: check-yaml
|
12 |
+
- id: double-quote-string-fixer
|
13 |
+
- id: end-of-file-fixer
|
14 |
+
- id: mixed-line-ending
|
15 |
+
args: ['--fix=lf']
|
16 |
+
- id: requirements-txt-fixer
|
17 |
+
- id: trailing-whitespace
|
18 |
+
- repo: https://github.com/myint/docformatter
|
19 |
+
rev: v1.4
|
20 |
+
hooks:
|
21 |
+
- id: docformatter
|
22 |
+
args: ['--in-place']
|
23 |
+
- repo: https://github.com/pycqa/isort
|
24 |
+
rev: 5.12.0
|
25 |
+
hooks:
|
26 |
+
- id: isort
|
27 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
28 |
+
rev: v0.991
|
29 |
+
hooks:
|
30 |
+
- id: mypy
|
31 |
+
args: ['--ignore-missing-imports']
|
32 |
+
additional_dependencies: ['types-python-slugify']
|
33 |
+
- repo: https://github.com/google/yapf
|
34 |
+
rev: v0.32.0
|
35 |
+
hooks:
|
36 |
+
- id: yapf
|
37 |
+
args: ['--parallel', '--in-place']
|
.style.yapf
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[style]
|
2 |
+
based_on_style = pep8
|
3 |
+
blank_line_before_nested_class_or_def = false
|
4 |
+
spaces_before_comment = 2
|
5 |
+
split_before_logical_operator = true
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 👀
|
4 |
colorFrom: yellow
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.20.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
1 |
---
|
2 |
+
title: ModelScope Chinese text2image (tiny)
|
3 |
emoji: 👀
|
4 |
colorFrom: yellow
|
5 |
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.20.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import os
|
6 |
+
import shlex
|
7 |
+
import subprocess
|
8 |
+
|
9 |
+
import gradio as gr
|
10 |
+
import numpy as np
|
11 |
+
import torch
|
12 |
+
from modelscope.pipelines import pipeline
|
13 |
+
from modelscope.utils.constant import Tasks
|
14 |
+
|
15 |
+
if os.getenv('SYSTEM') == 'spaces':
|
16 |
+
subprocess.run(
|
17 |
+
shlex.split(
|
18 |
+
'pip install git+https://github.com/modelscope/modelscope.git@refs/pull/173/head'
|
19 |
+
))
|
20 |
+
|
21 |
+
DESCRIPTION = '# [ModelScope Chinese text2image (tiny)](https://www.modelscope.cn/models/damo/cv_diffusion_text-to-image-synthesis_tiny/summary)'
|
22 |
+
|
23 |
+
SPACE_ID = os.getenv('SPACE_ID')
|
24 |
+
if SPACE_ID is not None:
|
25 |
+
DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
26 |
+
|
27 |
+
pipe = pipeline(Tasks.text_to_image_synthesis,
|
28 |
+
'damo/cv_diffusion_text-to-image-synthesis_tiny')
|
29 |
+
|
30 |
+
|
31 |
+
def run(
|
32 |
+
text: str,
|
33 |
+
seed: int,
|
34 |
+
num_steps_generator: int,
|
35 |
+
num_steps_upscaler1: int,
|
36 |
+
num_steps_upscaler2: int,
|
37 |
+
guidance_scale: float,
|
38 |
+
) -> np.ndarray:
|
39 |
+
torch.manual_seed(seed)
|
40 |
+
results = pipe({
|
41 |
+
'text': text,
|
42 |
+
'solver': 'ddim',
|
43 |
+
'generator_ddim_timesteps': num_steps_generator,
|
44 |
+
'upsampler_256_ddim_timesteps': num_steps_upscaler1,
|
45 |
+
'upsampler_1024_ddim_timesteps': num_steps_upscaler2,
|
46 |
+
'generator_guide_scale': guidance_scale,
|
47 |
+
})
|
48 |
+
return results['output_imgs'][0]
|
49 |
+
|
50 |
+
|
51 |
+
examples = [
|
52 |
+
['中国山水画', 0, 250, 50, 20, 5.0],
|
53 |
+
]
|
54 |
+
|
55 |
+
with gr.Blocks(css='style.css') as demo:
|
56 |
+
gr.Markdown(DESCRIPTION)
|
57 |
+
with gr.Row():
|
58 |
+
with gr.Column():
|
59 |
+
text = gr.Text(label='Prompt')
|
60 |
+
seed = gr.Slider(label='Seed',
|
61 |
+
minimum=0,
|
62 |
+
maximum=100000,
|
63 |
+
value=0,
|
64 |
+
step=1,
|
65 |
+
randomize=True)
|
66 |
+
run_button = gr.Button('Run')
|
67 |
+
with gr.Accordion('Advanced options', open=False):
|
68 |
+
num_steps_generator = gr.Slider(label='Steps (Generator)',
|
69 |
+
minimum=1,
|
70 |
+
maximum=1000,
|
71 |
+
value=250,
|
72 |
+
step=1)
|
73 |
+
num_steps_upscaler1 = gr.Slider(
|
74 |
+
label='Steps (Upscaler 64=>256)',
|
75 |
+
minimum=1,
|
76 |
+
maximum=50,
|
77 |
+
value=50,
|
78 |
+
step=1)
|
79 |
+
num_steps_upscaler2 = gr.Slider(
|
80 |
+
label='Steps (Upscaler 256=>1024)',
|
81 |
+
minimum=1,
|
82 |
+
maximum=20,
|
83 |
+
value=20,
|
84 |
+
step=1)
|
85 |
+
guidance_scale = gr.Slider(label='Guidance scale',
|
86 |
+
minimum=0,
|
87 |
+
maximum=100,
|
88 |
+
value=5.0,
|
89 |
+
step=0.1)
|
90 |
+
with gr.Column():
|
91 |
+
result = gr.Image(label='Output')
|
92 |
+
|
93 |
+
inputs = [
|
94 |
+
text,
|
95 |
+
seed,
|
96 |
+
num_steps_generator,
|
97 |
+
num_steps_upscaler1,
|
98 |
+
num_steps_upscaler2,
|
99 |
+
guidance_scale,
|
100 |
+
]
|
101 |
+
with gr.Row():
|
102 |
+
gr.Examples(examples=examples,
|
103 |
+
inputs=inputs,
|
104 |
+
outputs=result,
|
105 |
+
fn=run,
|
106 |
+
cache_examples=True)
|
107 |
+
|
108 |
+
text.submit(fn=run, inputs=inputs, outputs=result)
|
109 |
+
run_button.click(fn=run, inputs=inputs, outputs=result)
|
110 |
+
|
111 |
+
demo.queue(api_open=False).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
decord==0.6.0
|
2 |
+
fairseq==0.12.2
|
3 |
+
modelscope[multi-modal]==1.3.2
|
style.css
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
h1 {
|
2 |
+
text-align: center;
|
3 |
+
}
|