Spaces:
Running
Running
Duplicate from akhaliq/woolitize-768sd1-5
Browse filesCo-authored-by: Ahsen Khaliq <akhaliq@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +137 -0
- requirements.txt +7 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Woolitize 768sd1 5
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.12.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: akhaliq/woolitize-768sd1-5
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
|
2 |
+
import gradio as gr
|
3 |
+
import torch
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
model_id = 'plasmo/woolitize-768sd1-5'
|
7 |
+
prefix = ''
|
8 |
+
|
9 |
+
scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
|
10 |
+
|
11 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
12 |
+
model_id,
|
13 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
14 |
+
scheduler=scheduler)
|
15 |
+
|
16 |
+
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
|
17 |
+
model_id,
|
18 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
19 |
+
scheduler=scheduler)
|
20 |
+
|
21 |
+
if torch.cuda.is_available():
|
22 |
+
pipe = pipe.to("cuda")
|
23 |
+
pipe_i2i = pipe_i2i.to("cuda")
|
24 |
+
|
25 |
+
def error_str(error, title="Error"):
|
26 |
+
return f"""#### {title}
|
27 |
+
{error}""" if error else ""
|
28 |
+
|
29 |
+
def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
|
30 |
+
|
31 |
+
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
32 |
+
prompt = f"{prefix} {prompt}" if auto_prefix else prompt
|
33 |
+
|
34 |
+
try:
|
35 |
+
if img is not None:
|
36 |
+
return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
|
37 |
+
else:
|
38 |
+
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
39 |
+
except Exception as e:
|
40 |
+
return None, error_str(e)
|
41 |
+
|
42 |
+
def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
|
43 |
+
|
44 |
+
result = pipe(
|
45 |
+
prompt,
|
46 |
+
negative_prompt = neg_prompt,
|
47 |
+
num_inference_steps = int(steps),
|
48 |
+
guidance_scale = guidance,
|
49 |
+
width = width,
|
50 |
+
height = height,
|
51 |
+
generator = generator)
|
52 |
+
|
53 |
+
return result.images[0]
|
54 |
+
|
55 |
+
def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
56 |
+
|
57 |
+
ratio = min(height / img.height, width / img.width)
|
58 |
+
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
|
59 |
+
result = pipe_i2i(
|
60 |
+
prompt,
|
61 |
+
negative_prompt = neg_prompt,
|
62 |
+
init_image = img,
|
63 |
+
num_inference_steps = int(steps),
|
64 |
+
strength = strength,
|
65 |
+
guidance_scale = guidance,
|
66 |
+
width = width,
|
67 |
+
height = height,
|
68 |
+
generator = generator)
|
69 |
+
|
70 |
+
return result.images[0]
|
71 |
+
|
72 |
+
css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
73 |
+
"""
|
74 |
+
with gr.Blocks(css=css) as demo:
|
75 |
+
gr.HTML(
|
76 |
+
f"""
|
77 |
+
<div class="main-div">
|
78 |
+
<div>
|
79 |
+
<h1>Woolitize 768sd1 5</h1>
|
80 |
+
</div>
|
81 |
+
<p>
|
82 |
+
Demo for <a href="https://huggingface.co/plasmo/woolitize-768sd1-5">Woolitize 768sd1 5</a> Stable Diffusion model.<br>
|
83 |
+
{"Add the following tokens to your prompts for the model to work properly: <b>prefix</b>" if prefix else ""}
|
84 |
+
</p>
|
85 |
+
Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/woolitize-768sd1-5/settings'>Settings</a></b>"}<br><br>
|
86 |
+
<a style="display:inline-block" href="https://huggingface.co/spaces/akhaliq/woolitize-768sd1-5?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
87 |
+
</div>
|
88 |
+
"""
|
89 |
+
)
|
90 |
+
with gr.Row():
|
91 |
+
|
92 |
+
with gr.Column(scale=55):
|
93 |
+
with gr.Group():
|
94 |
+
with gr.Row():
|
95 |
+
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
|
96 |
+
generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
|
97 |
+
|
98 |
+
image_out = gr.Image(height=512)
|
99 |
+
error_output = gr.Markdown()
|
100 |
+
|
101 |
+
with gr.Column(scale=45):
|
102 |
+
with gr.Tab("Options"):
|
103 |
+
with gr.Group():
|
104 |
+
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
105 |
+
auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=prefix, visible=prefix)
|
106 |
+
|
107 |
+
with gr.Row():
|
108 |
+
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
109 |
+
steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
|
110 |
+
|
111 |
+
with gr.Row():
|
112 |
+
width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
|
113 |
+
height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
|
114 |
+
|
115 |
+
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
116 |
+
|
117 |
+
with gr.Tab("Image to image"):
|
118 |
+
with gr.Group():
|
119 |
+
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
|
120 |
+
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
121 |
+
|
122 |
+
auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
|
123 |
+
|
124 |
+
inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
|
125 |
+
outputs = [image_out, error_output]
|
126 |
+
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
127 |
+
generate.click(inference, inputs=inputs, outputs=outputs)
|
128 |
+
|
129 |
+
gr.HTML("""
|
130 |
+
<div style="border-top: 1px solid #303030;">
|
131 |
+
<br>
|
132 |
+
<p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
|
133 |
+
</div>
|
134 |
+
""")
|
135 |
+
|
136 |
+
demo.queue(concurrency_count=1)
|
137 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
+
torch
|
3 |
+
diffusers
|
4 |
+
#transformers
|
5 |
+
git+https://github.com/huggingface/transformers
|
6 |
+
accelerate
|
7 |
+
ftfy
|