Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,6 @@ model_1 = gr.load("models/pimpilikipilapi1/NSFW_master")
|
|
5 |
model_2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
|
6 |
model_3 = gr.load("models/prashanth970/flux-lora-uncensored")
|
7 |
|
8 |
-
|
9 |
default_negative_prompt = (
|
10 |
"Extra limbs, Extra fingers or toes, Disfigured face, Distorted hands, Mutated body parts, "
|
11 |
"Missing limbs, Asymmetrical features, Blurry face, Poor anatomy, Incorrect proportions, Crooked eyes, "
|
@@ -19,32 +18,24 @@ default_negative_prompt = (
|
|
19 |
)
|
20 |
|
21 |
|
22 |
-
def
|
23 |
-
|
|
|
24 |
try:
|
25 |
-
|
26 |
except TypeError:
|
27 |
-
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
return model_2(prompt, negative_prompt=negative_prompt)
|
33 |
-
except TypeError:
|
34 |
-
return model_2(prompt)
|
35 |
|
36 |
-
def generate_image_model_3(prompt, negative_prompt):
|
37 |
-
prompt += " 10k"
|
38 |
-
try:
|
39 |
-
return model_3(prompt, negative_prompt=negative_prompt)
|
40 |
-
except TypeError:
|
41 |
-
return model_3(prompt)
|
42 |
|
43 |
interface = gr.Interface(
|
44 |
fn=lambda prompt, negative_prompt: (
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
),
|
49 |
inputs=[
|
50 |
gr.Textbox(label="Type your prompt here: βοΈ", placeholder="Describe what you want..."),
|
@@ -56,8 +47,7 @@ interface = gr.Interface(
|
|
56 |
gr.Image(label="Generated Image - Model 3"),
|
57 |
],
|
58 |
title="Text to Image (NSFW) π",
|
59 |
-
theme="NoCrypt/miku",
|
60 |
description="β οΈ Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
|
61 |
)
|
62 |
|
63 |
-
interface.launch()
|
|
|
5 |
model_2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
|
6 |
model_3 = gr.load("models/prashanth970/flux-lora-uncensored")
|
7 |
|
|
|
8 |
default_negative_prompt = (
|
9 |
"Extra limbs, Extra fingers or toes, Disfigured face, Distorted hands, Mutated body parts, "
|
10 |
"Missing limbs, Asymmetrical features, Blurry face, Poor anatomy, Incorrect proportions, Crooked eyes, "
|
|
|
18 |
)
|
19 |
|
20 |
|
21 |
+
def generate_image(model, prompt, negative_prompt):
|
22 |
+
"""Generate image using the given model."""
|
23 |
+
prompt += " high-resolution"
|
24 |
try:
|
25 |
+
output = model(prompt, negative_prompt=negative_prompt)
|
26 |
except TypeError:
|
27 |
+
output = model(prompt)
|
28 |
|
29 |
+
if isinstance(output, tuple) and len(output) > 0:
|
30 |
+
output = output[0]
|
31 |
+
return output
|
|
|
|
|
|
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
interface = gr.Interface(
|
35 |
fn=lambda prompt, negative_prompt: (
|
36 |
+
generate_image(model_1, prompt, negative_prompt),
|
37 |
+
generate_image(model_2, prompt, negative_prompt),
|
38 |
+
generate_image(model_3, prompt, negative_prompt),
|
39 |
),
|
40 |
inputs=[
|
41 |
gr.Textbox(label="Type your prompt here: βοΈ", placeholder="Describe what you want..."),
|
|
|
47 |
gr.Image(label="Generated Image - Model 3"),
|
48 |
],
|
49 |
title="Text to Image (NSFW) π",
|
|
|
50 |
description="β οΈ Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
|
51 |
)
|
52 |
|
53 |
+
interface.launch()
|