Spaces:
Runtime error
Runtime error
Commit
·
0934670
1
Parent(s):
c9bf0bf
Update app.py
Browse files
app.py
CHANGED
@@ -204,11 +204,56 @@ class ImStack(nn.Module):
|
|
204 |
|
205 |
|
206 |
|
207 |
-
def generate(text,
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
|
213 |
iface = gr.Interface(fn=generate,
|
214 |
inputs=[
|
|
|
204 |
|
205 |
|
206 |
|
207 |
+
def generate(text, n_iter):
|
208 |
+
|
209 |
+
lr=0.25 #@param
|
210 |
+
# init_image=None #@param
|
211 |
+
weight_decay=1e-5 #@param
|
212 |
+
out_size=540 #@param
|
213 |
+
base_size=20 #@param
|
214 |
+
n_layers=4 #@param
|
215 |
+
scale=3 #@param
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
p_prompts = []
|
220 |
+
embed = perceptor.encode_text(clip.tokenize(text).to(device)).float()
|
221 |
+
p_prompts.append(Prompt(embed, 1, float('-inf')).to(device)) # 1 is the weight
|
222 |
+
|
223 |
+
# SOme negative prompts
|
224 |
+
n_prompts = []
|
225 |
+
for pr in ["Random noise", 'saturated rainbow RGB deep dream']:
|
226 |
+
embed = perceptor.encode_text(clip.tokenize(pr).to(device)).float()
|
227 |
+
n_prompts.append(Prompt(embed, 0.5, float('-inf')).to(device)) # 0.5 is the weight
|
228 |
+
|
229 |
+
# The ImageStack - trying a different scale and n_layers
|
230 |
+
ims = ImStack(base_size=base_size, scale=scale, n_layers=n_layers, out_size=out_size, decay=0.4)
|
231 |
+
|
232 |
+
optimizer = optim.Adam(ims.layers, lr=lr, weight_decay=weight_decay)
|
233 |
+
losses = []
|
234 |
+
|
235 |
+
for i in range(n_iter):
|
236 |
+
optimizer.zero_grad()
|
237 |
+
|
238 |
+
if i < 15: # Save time by skipping the cutouts and focusing on the lower layers
|
239 |
+
im = ims.preview(n_preview=1 + i//20 )
|
240 |
+
iii = perceptor.encode_image(normalize(im)).float()
|
241 |
+
else:
|
242 |
+
im = ims()
|
243 |
+
iii = perceptor.encode_image(normalize(make_cutouts(im))).float()
|
244 |
+
|
245 |
+
l = 0
|
246 |
+
for prompt in p_prompts:
|
247 |
+
l += prompt(iii)
|
248 |
+
for prompt in n_prompts:
|
249 |
+
l -= prompt(iii)
|
250 |
+
|
251 |
+
losses.append(float(l.detach().cpu()))
|
252 |
+
l.backward() # Backprop
|
253 |
+
optimizer.step() # Update
|
254 |
+
|
255 |
+
im = ims.to_pil()
|
256 |
+
return np.array(im)
|
257 |
|
258 |
iface = gr.Interface(fn=generate,
|
259 |
inputs=[
|