Spaces:
Sleeping
Sleeping
+ nsfw stub
Browse files
app.py
CHANGED
@@ -51,11 +51,11 @@ def inference(model, img, strength, prompt, neg_prompt, guidance, steps, width,
|
|
51 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
52 |
|
53 |
if img is not None:
|
54 |
-
return
|
55 |
else:
|
56 |
-
return
|
57 |
|
58 |
-
def
|
59 |
|
60 |
global current_model
|
61 |
global pipe
|
@@ -71,18 +71,19 @@ def img_to_img(model, prompt, neg_prompt, guidance, steps, width, height, genera
|
|
71 |
pipe = pipe.to("cuda")
|
72 |
|
73 |
prompt = prompt_prefixes[current_model] + prompt
|
74 |
-
|
75 |
prompt,
|
76 |
negative_prompt=neg_prompt,
|
77 |
num_inference_steps=int(steps),
|
78 |
guidance_scale=guidance,
|
79 |
width=width,
|
80 |
height=height,
|
81 |
-
generator=generator)
|
82 |
|
|
|
83 |
return image
|
84 |
|
85 |
-
def
|
86 |
|
87 |
global current_model
|
88 |
global pipe
|
@@ -100,7 +101,7 @@ def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width,
|
|
100 |
prompt = prompt_prefixes[current_model] + prompt
|
101 |
ratio = min(height / img.height, width / img.width)
|
102 |
img = img.resize((int(img.width * ratio), int(img.height * ratio)))
|
103 |
-
|
104 |
prompt,
|
105 |
negative_prompt=neg_prompt,
|
106 |
init_image=img,
|
@@ -109,8 +110,9 @@ def txt_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width,
|
|
109 |
guidance_scale=guidance,
|
110 |
width=width,
|
111 |
height=height,
|
112 |
-
generator=generator)
|
113 |
-
|
|
|
114 |
return image
|
115 |
|
116 |
|
@@ -159,6 +161,9 @@ with gr.Blocks(css=css) as demo:
|
|
159 |
with gr.Column():
|
160 |
model = gr.Dropdown(label="Model", choices=models, value=models[0])
|
161 |
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
|
|
|
|
|
|
|
162 |
with gr.Tab("Options"):
|
163 |
|
164 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
@@ -173,8 +178,6 @@ with gr.Blocks(css=css) as demo:
|
|
173 |
|
174 |
with gr.Column():
|
175 |
image_out = gr.Image(height=512)
|
176 |
-
run = gr.Button(value="Run")
|
177 |
-
gr.Markdown(f"Running on: {device}")
|
178 |
|
179 |
inputs = [model, image, strength, prompt, neg_prompt, guidance, steps, width, height, seed]
|
180 |
prompt.submit(inference, inputs=inputs, outputs=image_out)
|
|
|
51 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
52 |
|
53 |
if img is not None:
|
54 |
+
return img_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
|
55 |
else:
|
56 |
+
return txt_to_img(model, prompt, neg_prompt, guidance, steps, width, height, generator)
|
57 |
|
58 |
+
def txt_to_img(model, prompt, neg_prompt, guidance, steps, width, height, generator=None):
|
59 |
|
60 |
global current_model
|
61 |
global pipe
|
|
|
71 |
pipe = pipe.to("cuda")
|
72 |
|
73 |
prompt = prompt_prefixes[current_model] + prompt
|
74 |
+
results = pipe(
|
75 |
prompt,
|
76 |
negative_prompt=neg_prompt,
|
77 |
num_inference_steps=int(steps),
|
78 |
guidance_scale=guidance,
|
79 |
width=width,
|
80 |
height=height,
|
81 |
+
generator=generator)
|
82 |
|
83 |
+
image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
|
84 |
return image
|
85 |
|
86 |
+
def img_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
87 |
|
88 |
global current_model
|
89 |
global pipe
|
|
|
101 |
prompt = prompt_prefixes[current_model] + prompt
|
102 |
ratio = min(height / img.height, width / img.width)
|
103 |
img = img.resize((int(img.width * ratio), int(img.height * ratio)))
|
104 |
+
results = pipe(
|
105 |
prompt,
|
106 |
negative_prompt=neg_prompt,
|
107 |
init_image=img,
|
|
|
110 |
guidance_scale=guidance,
|
111 |
width=width,
|
112 |
height=height,
|
113 |
+
generator=generator)
|
114 |
+
|
115 |
+
image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
|
116 |
return image
|
117 |
|
118 |
|
|
|
161 |
with gr.Column():
|
162 |
model = gr.Dropdown(label="Model", choices=models, value=models[0])
|
163 |
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
|
164 |
+
run = gr.Button(value="Run")
|
165 |
+
gr.Markdown(f"Running on: {device}")
|
166 |
+
|
167 |
with gr.Tab("Options"):
|
168 |
|
169 |
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
|
|
178 |
|
179 |
with gr.Column():
|
180 |
image_out = gr.Image(height=512)
|
|
|
|
|
181 |
|
182 |
inputs = [model, image, strength, prompt, neg_prompt, guidance, steps, width, height, seed]
|
183 |
prompt.submit(inference, inputs=inputs, outputs=image_out)
|