Mohammad Ibrahim
commited on
Commit
•
84bfc2b
1
Parent(s):
fdadd50
Added all files
Browse files- Depth/learned_embeds.bin +3 -0
- Depth/mouseseed164.png +0 -0
- Depth/mouseseed64bright.png +0 -0
- First.ipynb +0 -0
- Jerry mouse/learned_embeds.bin +3 -0
- Jerry mouse/mouseseed64.png +0 -0
- Jerry mouse/mouseseed64bright.png +0 -0
- Learned.ipynb +0 -0
- Mobius/learned_embeds.bin +3 -0
- Mobius/mouseseed184.png +0 -0
- Mobius/mouseseed184bright.png +0 -0
- Oil paint/learned_embeds.bin +3 -0
- Oil paint/mouseseed32.png +0 -0
- Oil paint/mouseseed32bright.png +0 -0
- Polygon/learned_embeds.bin +3 -0
- Polygon/mouseseed204.png +0 -0
- Polygon/mouseseed204bright.png +0 -0
- Stable Diffusion Deep Dive.ipynb +0 -0
- app.py +269 -0
- requirements.txt +8 -0
Depth/learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e23ba9437d8ecc4acce4dae08118272855db108e24feaaf5d428df88661335de
|
3 |
+
size 3819
|
Depth/mouseseed164.png
ADDED
Depth/mouseseed64bright.png
ADDED
First.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Jerry mouse/learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbdcc3699f3ad8b464e63e1360d5acdd0d5fdf97f74c06962dbb08e60fb576ff
|
3 |
+
size 3840
|
Jerry mouse/mouseseed64.png
ADDED
Jerry mouse/mouseseed64bright.png
ADDED
Learned.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Mobius/learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ff8da3d4cbeb09ee7e0a6ef8b0dc647b31c23580678ec2f5eee0e8d5f087c29d
|
3 |
+
size 3819
|
Mobius/mouseseed184.png
ADDED
Mobius/mouseseed184bright.png
ADDED
Oil paint/learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:754d7d9c1fcdc7e05fd273f21e77b05bc89a4ba25415d24de1286f1fbdf9e0c7
|
3 |
+
size 3840
|
Oil paint/mouseseed32.png
ADDED
Oil paint/mouseseed32bright.png
ADDED
Polygon/learned_embeds.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b11448e8c3071bdf93ca73a5b1406ae26baae73e3962cb2c2222241294cae06c
|
3 |
+
size 3904
|
Polygon/mouseseed204.png
ADDED
Polygon/mouseseed204bright.png
ADDED
Stable Diffusion Deep Dive.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from base64 import b64encode
|
2 |
+
|
3 |
+
import numpy
|
4 |
+
import torch
|
5 |
+
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
|
6 |
+
from huggingface_hub import notebook_login
|
7 |
+
|
8 |
+
# For video display:
|
9 |
+
from IPython.display import HTML
|
10 |
+
from matplotlib import pyplot as plt
|
11 |
+
from pathlib import Path
|
12 |
+
from PIL import Image
|
13 |
+
from torch import autocast
|
14 |
+
from torchvision import transforms as tfms
|
15 |
+
from tqdm.auto import tqdm
|
16 |
+
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
17 |
+
import os
|
18 |
+
import os
|
19 |
+
# os.environ['HF_HOME'] = '/raid/users/mohammadibrahim-st/ModelCache'
|
20 |
+
torch.manual_seed(1)
|
21 |
+
if not (Path.home()/'.cache/huggingface'/'token').exists(): notebook_login()
|
22 |
+
|
23 |
+
# Supress some unnecessary warnings when loading the CLIPTextModel
|
24 |
+
logging.set_verbosity_error()
|
25 |
+
|
26 |
+
# Set device
|
27 |
+
torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
28 |
+
if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1"
|
29 |
+
|
30 |
+
# Load the autoencoder model which will be used to decode the latents into image space.
|
31 |
+
import os
|
32 |
+
|
33 |
+
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
|
34 |
+
|
35 |
+
# Load the tokenizer and text encoder to tokenize and encode the text.
|
36 |
+
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
37 |
+
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
|
38 |
+
|
39 |
+
# The UNet model for generating the latents.
|
40 |
+
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
|
41 |
+
|
42 |
+
# The noise scheduler
|
43 |
+
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
44 |
+
|
45 |
+
# To the GPU we go!
|
46 |
+
vae = vae.to(torch_device)
|
47 |
+
text_encoder = text_encoder.to(torch_device)
|
48 |
+
unet = unet.to(torch_device)
|
49 |
+
|
50 |
+
def pil_to_latent(input_im):
|
51 |
+
# Single image -> single latent in a batch (so size 1, 4, 64, 64)
|
52 |
+
with torch.no_grad():
|
53 |
+
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
|
54 |
+
return 0.18215 * latent.latent_dist.sample()
|
55 |
+
|
56 |
+
def latents_to_pil(latents):
|
57 |
+
# bath of latents -> list of images
|
58 |
+
latents = (1 / 0.18215) * latents
|
59 |
+
with torch.no_grad():
|
60 |
+
image = vae.decode(latents).sample
|
61 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
62 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
63 |
+
images = (image * 255).round().astype("uint8")
|
64 |
+
pil_images = [Image.fromarray(image) for image in images]
|
65 |
+
# pil_images[0].save('/raid/users/mohammadibrahim-st/TSAI/Assignment24/Depth/mouseseed64bright.png')
|
66 |
+
return pil_images
|
67 |
+
|
68 |
+
def set_timesteps(scheduler, num_inference_steps):
|
69 |
+
scheduler.set_timesteps(num_inference_steps)
|
70 |
+
scheduler.timesteps = scheduler.timesteps.to(torch.float32)
|
71 |
+
|
72 |
+
|
73 |
+
def brightness_loss(images, target_brightness):
|
74 |
+
# Convert images to grayscale to calculate brightness
|
75 |
+
grayscale_images = images.mean(dim=1, keepdim=True)
|
76 |
+
error = torch.abs(grayscale_images - target_brightness).mean()
|
77 |
+
return error
|
78 |
+
|
79 |
+
def generate_with_embs(text_input, text_embeddings, blossval):
|
80 |
+
height = 512 # default height of Stable Diffusion
|
81 |
+
width = 512 # default width of Stable Diffusion
|
82 |
+
num_inference_steps = 30 # Number of denoising steps
|
83 |
+
guidance_scale = 7.5 # Scale for classifier-free guidance
|
84 |
+
generator = torch.manual_seed(164) # Seed generator to create the inital latent noise
|
85 |
+
batch_size = 1
|
86 |
+
blue_loss_scale=200
|
87 |
+
|
88 |
+
max_length = text_input.input_ids.shape[-1]
|
89 |
+
uncond_input = tokenizer(
|
90 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
91 |
+
)
|
92 |
+
with torch.no_grad():
|
93 |
+
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
|
94 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
95 |
+
|
96 |
+
# Prep Scheduler
|
97 |
+
set_timesteps(scheduler, num_inference_steps)
|
98 |
+
|
99 |
+
# Prep latents
|
100 |
+
latents = torch.randn(
|
101 |
+
(batch_size, unet.in_channels, height // 8, width // 8),
|
102 |
+
generator=generator,
|
103 |
+
)
|
104 |
+
latents = latents.to(torch_device)
|
105 |
+
latents = latents * scheduler.init_noise_sigma
|
106 |
+
|
107 |
+
# Loop
|
108 |
+
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
|
109 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
110 |
+
latent_model_input = torch.cat([latents] * 2)
|
111 |
+
sigma = scheduler.sigmas[i]
|
112 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
113 |
+
|
114 |
+
# predict the noise residual
|
115 |
+
with torch.no_grad():
|
116 |
+
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
|
117 |
+
|
118 |
+
# perform guidance
|
119 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
120 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
121 |
+
|
122 |
+
# compute the previous noisy sample x_t -> x_t-1
|
123 |
+
if i%5 == 0:
|
124 |
+
# Requires grad on the latents
|
125 |
+
latents = latents.detach().requires_grad_()
|
126 |
+
|
127 |
+
# Get the predicted x0:
|
128 |
+
latents_x0 = latents - sigma * noise_pred
|
129 |
+
# latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
|
130 |
+
|
131 |
+
# Decode to image space
|
132 |
+
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
|
133 |
+
|
134 |
+
# Calculate loss
|
135 |
+
loss = brightness_loss(denoised_images, blossval) * blue_loss_scale
|
136 |
+
|
137 |
+
# Occasionally print it out
|
138 |
+
if i%10==0:
|
139 |
+
print(i, 'loss:', loss.item())
|
140 |
+
|
141 |
+
# Get gradient
|
142 |
+
cond_grad = torch.autograd.grad(loss, latents)[0]
|
143 |
+
|
144 |
+
# Modify the latents based on this gradient
|
145 |
+
latents = latents.detach() - cond_grad * sigma**2
|
146 |
+
|
147 |
+
# Now step with scheduler
|
148 |
+
latents = scheduler.step(noise_pred, t, latents).prev_sample
|
149 |
+
return latents_to_pil(latents)[0]
|
150 |
+
|
151 |
+
def build_causal_attention_mask(bsz, seq_len, dtype):
|
152 |
+
# lazily create causal attention mask, with full attention between the vision tokens
|
153 |
+
# pytorch uses additive attention mask; fill with -inf
|
154 |
+
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
|
155 |
+
mask.fill_(torch.tensor(torch.finfo(dtype).min))
|
156 |
+
mask.triu_(1) # zero out the lower diagonal
|
157 |
+
mask = mask.unsqueeze(1) # expand mask
|
158 |
+
return mask
|
159 |
+
def get_output_embeds(input_embeddings):
|
160 |
+
# CLIP's text model uses causal mask, so we prepare it here:
|
161 |
+
bsz, seq_len = input_embeddings.shape[:2]
|
162 |
+
causal_attention_mask = build_causal_attention_mask(bsz, seq_len, dtype=input_embeddings.dtype)
|
163 |
+
|
164 |
+
# Getting the output embeddings involves calling the model with passing output_hidden_states=True
|
165 |
+
# so that it doesn't just return the pooled final predictions:
|
166 |
+
encoder_outputs = text_encoder.text_model.encoder(
|
167 |
+
inputs_embeds=input_embeddings,
|
168 |
+
attention_mask=None, # We aren't using an attention mask so that can be None
|
169 |
+
causal_attention_mask=causal_attention_mask.to(torch_device),
|
170 |
+
output_attentions=None,
|
171 |
+
output_hidden_states=True, # We want the output embs not the final output
|
172 |
+
return_dict=None,
|
173 |
+
)
|
174 |
+
|
175 |
+
# We're interested in the output hidden state only
|
176 |
+
output = encoder_outputs[0]
|
177 |
+
|
178 |
+
# There is a final layer norm we need to pass these through
|
179 |
+
output = text_encoder.text_model.final_layer_norm(output)
|
180 |
+
|
181 |
+
# And now they're ready!
|
182 |
+
return output
|
183 |
+
|
184 |
+
# out_embs_test = get_output_embeds(input_embeddings) # Feed through the model with our new function
|
185 |
+
# print(out_embs_test.shape) # Check the output shape
|
186 |
+
# out_embs_test # Inspect the output
|
187 |
+
|
188 |
+
current_directory = os.path.dirname(__file__)
|
189 |
+
|
190 |
+
# Construct the paths dynamically
|
191 |
+
birb_embed = torch.load(os.path.join(current_directory, 'Depth', 'learned_embeds.bin'))
|
192 |
+
|
193 |
+
birb_embedjerry = torch.load(os.path.join(current_directory, 'Jerry mouse', 'learned_embeds.bin'))
|
194 |
+
|
195 |
+
birb_embedmobius = torch.load(os.path.join(current_directory, 'Mobius', 'learned_embeds.bin'))
|
196 |
+
|
197 |
+
birb_embedoilpaint = torch.load(os.path.join(current_directory, 'Oil paint', 'learned_embeds.bin'))
|
198 |
+
|
199 |
+
birb_embedpolygon = torch.load(os.path.join(current_directory, 'Polygon', 'learned_embeds.bin'))
|
200 |
+
|
201 |
+
import torch
|
202 |
+
import os
|
203 |
+
import gradio as gr
|
204 |
+
|
205 |
+
# Load the embeddings
|
206 |
+
# birb_embed = torch.load('/raid/users/mohammadibrahim-st/TSAI/Assignment24/Depth/learned_embeds.bin')
|
207 |
+
# birb_embedjerry = torch.load("/raid/users/mohammadibrahim-st/TSAI/Assignment24/Jerry mouse/learned_embeds.bin")
|
208 |
+
# birb_embedmobius = torch.load('/raid/users/mohammadibrahim-st/TSAI/Assignment24/Mobius/learned_embeds.bin')
|
209 |
+
# birb_embedoilpaint = torch.load('/raid/users/mohammadibrahim-st/TSAI/Assignment24/Oil paint/learned_embeds.bin')
|
210 |
+
# birb_embedpolygon = torch.load('/raid/users/mohammadibrahim-st/TSAI/Assignment24/Polygon/learned_embeds.bin')
|
211 |
+
|
212 |
+
# Set GRADIO temp directory
|
213 |
+
|
214 |
+
|
215 |
+
def generate_image(prompt, selected_embedding, blossval):
|
216 |
+
# Map selected_embedding to corresponding embedding file and key
|
217 |
+
embedding_dict = {
|
218 |
+
"Depth": (birb_embed, '<depthmap>'),
|
219 |
+
"Jerry mouse": (birb_embedjerry, '<jerrymouse>'),
|
220 |
+
"Mobius": (birb_embedmobius, '<moebius>'),
|
221 |
+
"Oil paint": (birb_embedoilpaint, 'oil_style'),
|
222 |
+
"Polygon": (birb_embedpolygon, '<low-poly-hd-logos-icons>')
|
223 |
+
}
|
224 |
+
|
225 |
+
token_emb_layer = text_encoder.text_model.embeddings.token_embedding
|
226 |
+
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
|
227 |
+
position_ids = text_encoder.text_model.embeddings.position_ids[:, :77]
|
228 |
+
position_embeddings = pos_emb_layer(position_ids)
|
229 |
+
|
230 |
+
# Tokenize
|
231 |
+
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
232 |
+
input_ids = text_input.input_ids.to(torch_device)
|
233 |
+
|
234 |
+
# Get token embeddings
|
235 |
+
token_embeddings = token_emb_layer(input_ids)
|
236 |
+
|
237 |
+
# Select the appropriate birb embedding and key based on user input
|
238 |
+
selected_embedding_file, embedding_key = embedding_dict[selected_embedding]
|
239 |
+
replacement_token_embedding = selected_embedding_file[embedding_key].to(torch_device)
|
240 |
+
|
241 |
+
# Insert this into the token embeddings
|
242 |
+
token_embeddings[0, torch.where(input_ids[0] == 6829)] = replacement_token_embedding.to(torch_device)
|
243 |
+
|
244 |
+
# Combine with pos embs
|
245 |
+
input_embeddings = token_embeddings + position_embeddings
|
246 |
+
|
247 |
+
# Feed through to get final output embs
|
248 |
+
modified_output_embeddings = get_output_embeds(input_embeddings)
|
249 |
+
|
250 |
+
# Generate an image with this and return it
|
251 |
+
generated_image = generate_with_embs(text_input, modified_output_embeddings, blossval)
|
252 |
+
return generated_image
|
253 |
+
|
254 |
+
# Define options for the dropdown
|
255 |
+
embedding_options = ["Depth", "Jerry mouse", "Mobius", "Oil paint", "Polygon"]
|
256 |
+
|
257 |
+
# Create Gradio interface
|
258 |
+
iface = gr.Interface(
|
259 |
+
fn=generate_image,
|
260 |
+
inputs=[
|
261 |
+
"text",
|
262 |
+
gr.Dropdown(choices=embedding_options, label="Select Style"),
|
263 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Adjust Brightness loss for image (higher means brighter image)")
|
264 |
+
],
|
265 |
+
outputs="image",
|
266 |
+
title="Image Generation App (Please use the word 'puppy' in the prompt)"
|
267 |
+
)
|
268 |
+
|
269 |
+
iface.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
diffusers
|
3 |
+
huggingface_hub
|
4 |
+
transformers
|
5 |
+
Pillow
|
6 |
+
tqdm
|
7 |
+
IPython
|
8 |
+
matplotlib
|