Spaces:
Running
on
Zero
Running
on
Zero
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import uuid
|
3 |
+
import os
|
4 |
+
import math
|
5 |
+
import random
|
6 |
+
import spaces
|
7 |
+
import gradio as gr
|
8 |
+
import torch
|
9 |
+
from PIL import Image, ImageOps
|
10 |
+
from diffusers import StableDiffusionInstructPix2PixPipeline
|
11 |
+
from huggingface_hub import InferenceClient
|
12 |
+
|
13 |
+
help_text = """
|
14 |
+
To optimize image editing results:
|
15 |
+
- Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
|
16 |
+
- Modify the **Text CFG weight** to influence how closely the edit follows text instructions. Increase it to adhere more to the text, or decrease it for subtler changes.
|
17 |
+
- Experiment with different **random seeds** and **CFG values** for varied outcomes.
|
18 |
+
- **Rephrase your instructions** for potentially better results.
|
19 |
+
- **Increase the number of steps** for enhanced edits.
|
20 |
+
- For better facial details, especially if they're small, **crop the image** to enlarge the face's presence.
|
21 |
+
"""
|
22 |
+
|
23 |
+
model_id = "timbrooks/instruct-pix2pix"
|
24 |
+
|
25 |
+
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
26 |
+
|
27 |
+
if not torch.cuda.is_available():
|
28 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
|
29 |
+
|
30 |
+
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
31 |
+
|
32 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
33 |
+
|
34 |
+
if torch.cuda.is_available():
|
35 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
36 |
+
"sd-community/sdxl-flash",
|
37 |
+
torch_dtype=torch.float16,
|
38 |
+
use_safetensors=True,
|
39 |
+
add_watermarker=False
|
40 |
+
)
|
41 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
42 |
+
|
43 |
+
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
44 |
+
if randomize_seed:
|
45 |
+
seed = random.randint(0, 999999)
|
46 |
+
return seed
|
47 |
+
|
48 |
+
def resize_image(image, output_size=(512, 512)):
|
49 |
+
# Calculate aspect ratios
|
50 |
+
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
51 |
+
image_aspect = image.width / image.height # Aspect ratio of the original image
|
52 |
+
|
53 |
+
# Resize then crop if the original image is larger
|
54 |
+
if image_aspect > target_aspect:
|
55 |
+
new_height = output_size[1]
|
56 |
+
new_width = int(new_height * image_aspect)
|
57 |
+
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
58 |
+
left = (new_width - output_size[0]) / 2
|
59 |
+
top = 0
|
60 |
+
right = (new_width + output_size[0]) / 2
|
61 |
+
bottom = output_size[1]
|
62 |
+
else:
|
63 |
+
new_width = output_size[0]
|
64 |
+
new_height = int(new_width / image_aspect)
|
65 |
+
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
66 |
+
left = 0
|
67 |
+
top = (new_height - output_size[1]) / 2
|
68 |
+
right = output_size[0]
|
69 |
+
bottom = (new_height + output_size[1]) / 2
|
70 |
+
cropped_image = resized_image.crop((left, top, right, bottom))
|
71 |
+
return cropped_image
|
72 |
+
|
73 |
+
pipe2 = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda")
|
74 |
+
|
75 |
+
@spaces.GPU(duration=30, queue=False)
|
76 |
+
def king(type = "Image Editing",
|
77 |
+
input_image = None,
|
78 |
+
instruction: str = "Eiffel tower",
|
79 |
+
steps: int = 8,
|
80 |
+
randomize_seed: bool = False,
|
81 |
+
seed: int = 24,
|
82 |
+
text_cfg_scale: float = 7.3,
|
83 |
+
image_cfg_scale: float = 1.7,
|
84 |
+
width: int = 1024,
|
85 |
+
height: int = 1024,
|
86 |
+
guidance_scale: float = 3,
|
87 |
+
use_resolution_binning: bool = True,
|
88 |
+
progress=gr.Progress(track_tqdm=True),
|
89 |
+
):
|
90 |
+
if type=="Image Generation" :
|
91 |
+
pipe.to(device)
|
92 |
+
seed = int(randomize_seed_fn(seed, randomize_seed))
|
93 |
+
generator = torch.Generator().manual_seed(seed)
|
94 |
+
|
95 |
+
options = {
|
96 |
+
"prompt":instruction,
|
97 |
+
"width":width,
|
98 |
+
"height":height,
|
99 |
+
"guidance_scale":guidance_scale,
|
100 |
+
"num_inference_steps":steps,
|
101 |
+
"generator":generator,
|
102 |
+
"use_resolution_binning":use_resolution_binning,
|
103 |
+
"output_type":"pil",
|
104 |
+
}
|
105 |
+
|
106 |
+
output_image = pipe(**options).images[0]
|
107 |
+
return seed, output_image
|
108 |
+
else:
|
109 |
+
seed = int(randomize_seed_fn(seed, randomize_seed))
|
110 |
+
text_cfg_scale = text_cfg_scale
|
111 |
+
image_cfg_scale = image_cfg_scale
|
112 |
+
input_image = input_image
|
113 |
+
|
114 |
+
steps=steps*6
|
115 |
+
generator = torch.manual_seed(seed)
|
116 |
+
output_image = pipe2(
|
117 |
+
instruction, image=input_image,
|
118 |
+
guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale,
|
119 |
+
num_inference_steps=steps, generator=generator).images[0]
|
120 |
+
return seed, output_image
|
121 |
+
|
122 |
+
def response(instruction, input_image=None):
|
123 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
124 |
+
|
125 |
+
generate_kwargs = dict(
|
126 |
+
max_new_tokens=5,
|
127 |
+
)
|
128 |
+
|
129 |
+
system="[SYSTEM] You will be provided with text, and your task is to classify task is image generation or image editing answer with only task do not say anything else and stop as soon as possible. [TEXT]"
|
130 |
+
|
131 |
+
formatted_prompt = system + instruction + "[TASK]"
|
132 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
133 |
+
output = ""
|
134 |
+
for response in stream:
|
135 |
+
if not response.token.text == "</s>":
|
136 |
+
output += response.token.text
|
137 |
+
|
138 |
+
if input_image is None:
|
139 |
+
output="Image Generation"
|
140 |
+
|
141 |
+
if "editing" in output:
|
142 |
+
output = "Image Editing"
|
143 |
+
else:
|
144 |
+
output = "Image Generation"
|
145 |
+
return output
|
146 |
+
|
147 |
+
css = '''
|
148 |
+
.gradio-container{max-width: 600px !important}
|
149 |
+
h1{text-align:center}
|
150 |
+
footer {
|
151 |
+
visibility: hidden
|
152 |
+
}
|
153 |
+
'''
|
154 |
+
|
155 |
+
with gr.Blocks(css=css) as demo:
|
156 |
+
gr.Markdown("# Image Generator Pro")
|
157 |
+
with gr.Row():
|
158 |
+
with gr.Column(scale=4):
|
159 |
+
instruction = gr.Textbox(lines=1, label="Instruction", interactive=True)
|
160 |
+
with gr.Column(scale=1):
|
161 |
+
generate_button = gr.Button("Generate")
|
162 |
+
with gr.Column(scale=1):
|
163 |
+
type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, visible=False)
|
164 |
+
|
165 |
+
with gr.Row():
|
166 |
+
input_image = gr.Image(label="Image", type="pil", interactive=True)
|
167 |
+
|
168 |
+
with gr.Row():
|
169 |
+
text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
|
170 |
+
image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
|
171 |
+
steps = gr.Number(value=8, precision=0, label="Steps", interactive=True)
|
172 |
+
randomize_seed = gr.Radio(
|
173 |
+
["Fix Seed", "Randomize Seed"],
|
174 |
+
value="Randomize Seed",
|
175 |
+
type="index",
|
176 |
+
show_label=False,
|
177 |
+
interactive=True,
|
178 |
+
)
|
179 |
+
seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True)
|
180 |
+
|
181 |
+
gr.Markdown(help_text)
|
182 |
+
|
183 |
+
instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
|
184 |
+
|
185 |
+
generate_button.click(
|
186 |
+
fn=king,
|
187 |
+
inputs=[type,
|
188 |
+
input_image,
|
189 |
+
instruction,
|
190 |
+
steps,
|
191 |
+
randomize_seed,
|
192 |
+
seed,
|
193 |
+
text_cfg_scale,
|
194 |
+
image_cfg_scale,
|
195 |
+
],
|
196 |
+
outputs=[seed, input_image],
|
197 |
+
queue=False
|
198 |
+
)
|
199 |
+
|
200 |
+
demo.queue(max_size=99999).launch()
|