Update
Browse files- README.md +1 -1
- app.py +4 -3
- app_canny.py +4 -2
- app_depth.py +4 -2
- app_ip2p.py +4 -2
- app_lineart.py +4 -2
- app_mlsd.py +4 -2
- app_normal.py +4 -2
- app_openpose.py +4 -2
- app_scribble.py +4 -2
- app_scribble_interactive.py +4 -2
- app_segmentation.py +4 -2
- app_shuffle.py +4 -2
- app_softedge.py +4 -2
- requirements.txt +5 -5
- style.css +4 -0
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 📉
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.36.0
|
8 |
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
app.py
CHANGED
@@ -27,9 +27,6 @@ DESCRIPTION = '# ControlNet v1.1'
|
|
27 |
SPACE_ID = os.getenv('SPACE_ID')
|
28 |
ALLOW_CHANGING_BASE_MODEL = SPACE_ID != 'hysts/ControlNet-v1-1'
|
29 |
|
30 |
-
if SPACE_ID is not None:
|
31 |
-
DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
32 |
-
|
33 |
if not torch.cuda.is_available():
|
34 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
35 |
|
@@ -43,6 +40,10 @@ model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='Canny')
|
|
43 |
|
44 |
with gr.Blocks(css='style.css') as demo:
|
45 |
gr.Markdown(DESCRIPTION)
|
|
|
|
|
|
|
|
|
46 |
with gr.Tabs():
|
47 |
with gr.TabItem('Canny'):
|
48 |
create_demo_canny(model.process_canny,
|
|
|
27 |
SPACE_ID = os.getenv('SPACE_ID')
|
28 |
ALLOW_CHANGING_BASE_MODEL = SPACE_ID != 'hysts/ControlNet-v1-1'
|
29 |
|
|
|
|
|
|
|
30 |
if not torch.cuda.is_available():
|
31 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
32 |
|
|
|
40 |
|
41 |
with gr.Blocks(css='style.css') as demo:
|
42 |
gr.Markdown(DESCRIPTION)
|
43 |
+
gr.DuplicateButton(value='Duplicate Space for private use',
|
44 |
+
elem_id='duplicate-button',
|
45 |
+
visible=os.getenv('SYSTEM') == 'spaces')
|
46 |
+
|
47 |
with gr.Tabs():
|
48 |
with gr.TabItem('Canny'):
|
49 |
create_demo_canny(model.process_canny,
|
app_canny.py
CHANGED
@@ -62,8 +62,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
62 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
63 |
)
|
64 |
with gr.Column():
|
65 |
-
result = gr.Gallery(label='Output',
|
66 |
-
|
|
|
|
|
67 |
inputs = [
|
68 |
image,
|
69 |
prompt,
|
|
|
62 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
63 |
)
|
64 |
with gr.Column():
|
65 |
+
result = gr.Gallery(label='Output',
|
66 |
+
show_label=False,
|
67 |
+
columns=2,
|
68 |
+
object_fit='scale-down')
|
69 |
inputs = [
|
70 |
image,
|
71 |
prompt,
|
app_depth.py
CHANGED
@@ -61,8 +61,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
61 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
62 |
)
|
63 |
with gr.Column():
|
64 |
-
result = gr.Gallery(label='Output',
|
65 |
-
|
|
|
|
|
66 |
inputs = [
|
67 |
image,
|
68 |
prompt,
|
|
|
61 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
62 |
)
|
63 |
with gr.Column():
|
64 |
+
result = gr.Gallery(label='Output',
|
65 |
+
show_label=False,
|
66 |
+
columns=2,
|
67 |
+
object_fit='scale-down')
|
68 |
inputs = [
|
69 |
image,
|
70 |
prompt,
|
app_ip2p.py
CHANGED
@@ -50,8 +50,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
50 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
51 |
)
|
52 |
with gr.Column():
|
53 |
-
result = gr.Gallery(label='Output',
|
54 |
-
|
|
|
|
|
55 |
inputs = [
|
56 |
image,
|
57 |
prompt,
|
|
|
50 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
51 |
)
|
52 |
with gr.Column():
|
53 |
+
result = gr.Gallery(label='Output',
|
54 |
+
show_label=False,
|
55 |
+
columns=2,
|
56 |
+
object_fit='scale-down')
|
57 |
inputs = [
|
58 |
image,
|
59 |
prompt,
|
app_lineart.py
CHANGED
@@ -70,8 +70,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
70 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
71 |
)
|
72 |
with gr.Column():
|
73 |
-
result = gr.Gallery(label='Output',
|
74 |
-
|
|
|
|
|
75 |
inputs = [
|
76 |
image,
|
77 |
prompt,
|
|
|
70 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
71 |
)
|
72 |
with gr.Column():
|
73 |
+
result = gr.Gallery(label='Output',
|
74 |
+
show_label=False,
|
75 |
+
columns=2,
|
76 |
+
object_fit='scale-down')
|
77 |
inputs = [
|
78 |
image,
|
79 |
prompt,
|
app_mlsd.py
CHANGED
@@ -68,8 +68,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
68 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
69 |
)
|
70 |
with gr.Column():
|
71 |
-
result = gr.Gallery(label='Output',
|
72 |
-
|
|
|
|
|
73 |
inputs = [
|
74 |
image,
|
75 |
prompt,
|
|
|
68 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
69 |
)
|
70 |
with gr.Column():
|
71 |
+
result = gr.Gallery(label='Output',
|
72 |
+
show_label=False,
|
73 |
+
columns=2,
|
74 |
+
object_fit='scale-down')
|
75 |
inputs = [
|
76 |
image,
|
77 |
prompt,
|
app_normal.py
CHANGED
@@ -60,8 +60,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
-
result = gr.Gallery(label='Output',
|
64 |
-
|
|
|
|
|
65 |
inputs = [
|
66 |
image,
|
67 |
prompt,
|
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
+
result = gr.Gallery(label='Output',
|
64 |
+
show_label=False,
|
65 |
+
columns=2,
|
66 |
+
object_fit='scale-down')
|
67 |
inputs = [
|
68 |
image,
|
69 |
prompt,
|
app_openpose.py
CHANGED
@@ -60,8 +60,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
-
result = gr.Gallery(label='Output',
|
64 |
-
|
|
|
|
|
65 |
inputs = [
|
66 |
image,
|
67 |
prompt,
|
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
+
result = gr.Gallery(label='Output',
|
64 |
+
show_label=False,
|
65 |
+
columns=2,
|
66 |
+
object_fit='scale-down')
|
67 |
inputs = [
|
68 |
image,
|
69 |
prompt,
|
app_scribble.py
CHANGED
@@ -61,8 +61,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
61 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
62 |
)
|
63 |
with gr.Column():
|
64 |
-
result = gr.Gallery(label='Output',
|
65 |
-
|
|
|
|
|
66 |
inputs = [
|
67 |
image,
|
68 |
prompt,
|
|
|
61 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
62 |
)
|
63 |
with gr.Column():
|
64 |
+
result = gr.Gallery(label='Output',
|
65 |
+
show_label=False,
|
66 |
+
columns=2,
|
67 |
+
object_fit='scale-down')
|
68 |
inputs = [
|
69 |
image,
|
70 |
prompt,
|
app_scribble_interactive.py
CHANGED
@@ -66,8 +66,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
66 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
67 |
)
|
68 |
with gr.Column():
|
69 |
-
result = gr.Gallery(label='Output',
|
70 |
-
|
|
|
|
|
71 |
|
72 |
create_button.click(fn=create_canvas,
|
73 |
inputs=[canvas_width, canvas_height],
|
|
|
66 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
67 |
)
|
68 |
with gr.Column():
|
69 |
+
result = gr.Gallery(label='Output',
|
70 |
+
show_label=False,
|
71 |
+
columns=2,
|
72 |
+
object_fit='scale-down')
|
73 |
|
74 |
create_button.click(fn=create_canvas,
|
75 |
inputs=[canvas_width, canvas_height],
|
app_segmentation.py
CHANGED
@@ -60,8 +60,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
-
result = gr.Gallery(label='Output',
|
64 |
-
|
|
|
|
|
65 |
inputs = [
|
66 |
image,
|
67 |
prompt,
|
|
|
60 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
61 |
)
|
62 |
with gr.Column():
|
63 |
+
result = gr.Gallery(label='Output',
|
64 |
+
show_label=False,
|
65 |
+
columns=2,
|
66 |
+
object_fit='scale-down')
|
67 |
inputs = [
|
68 |
image,
|
69 |
prompt,
|
app_shuffle.py
CHANGED
@@ -55,8 +55,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
55 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
56 |
)
|
57 |
with gr.Column():
|
58 |
-
result = gr.Gallery(label='Output',
|
59 |
-
|
|
|
|
|
60 |
inputs = [
|
61 |
image,
|
62 |
prompt,
|
|
|
55 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
56 |
)
|
57 |
with gr.Column():
|
58 |
+
result = gr.Gallery(label='Output',
|
59 |
+
show_label=False,
|
60 |
+
columns=2,
|
61 |
+
object_fit='scale-down')
|
62 |
inputs = [
|
63 |
image,
|
64 |
prompt,
|
app_softedge.py
CHANGED
@@ -66,8 +66,10 @@ def create_demo(process, max_images=12, default_num_images=3):
|
|
66 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
67 |
)
|
68 |
with gr.Column():
|
69 |
-
result = gr.Gallery(label='Output',
|
70 |
-
|
|
|
|
|
71 |
inputs = [
|
72 |
image,
|
73 |
prompt,
|
|
|
66 |
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
67 |
)
|
68 |
with gr.Column():
|
69 |
+
result = gr.Gallery(label='Output',
|
70 |
+
show_label=False,
|
71 |
+
columns=2,
|
72 |
+
object_fit='scale-down')
|
73 |
inputs = [
|
74 |
image,
|
75 |
prompt,
|
requirements.txt
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
accelerate==0.20.3
|
2 |
-
controlnet_aux==0.0.
|
3 |
-
diffusers==0.
|
4 |
einops==0.6.1
|
5 |
-
gradio==3.
|
6 |
-
huggingface-hub==0.
|
7 |
-
opencv-python-headless==4.
|
8 |
safetensors==0.3.1
|
9 |
torch==2.0.1
|
10 |
torchvision==0.15.2
|
|
|
1 |
accelerate==0.20.3
|
2 |
+
controlnet_aux==0.0.6
|
3 |
+
diffusers==0.18.0
|
4 |
einops==0.6.1
|
5 |
+
gradio==3.36.0
|
6 |
+
huggingface-hub==0.16.3
|
7 |
+
opencv-python-headless==4.8.0.74
|
8 |
safetensors==0.3.1
|
9 |
torch==2.0.1
|
10 |
torchvision==0.15.2
|
style.css
CHANGED
@@ -1,3 +1,7 @@
|
|
1 |
h1 {
|
2 |
text-align: center;
|
3 |
}
|
|
|
|
|
|
|
|
|
|
1 |
h1 {
|
2 |
text-align: center;
|
3 |
}
|
4 |
+
|
5 |
+
#duplicate-button {
|
6 |
+
margin: auto;
|
7 |
+
}
|