lunarring commited on
Commit
4e72de9
1 Parent(s): cc36fba

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -492
app.py DELETED
@@ -1,492 +0,0 @@
1
- # Copyright 2022 Lunar Ring. All rights reserved.
2
- # Written by Johannes Stelzer, email stelzer@lunar-ring.ai twitter @j_stelzer
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import os
17
- import torch
18
- torch.backends.cudnn.benchmark = False
19
- torch.set_grad_enabled(False)
20
- import numpy as np
21
- import warnings
22
- warnings.filterwarnings('ignore')
23
- import warnings
24
- from tqdm.auto import tqdm
25
- from PIL import Image
26
- from movie_util import MovieSaver, concatenate_movies
27
- from latent_blending import LatentBlending
28
- from stable_diffusion_holder import StableDiffusionHolder
29
- import gradio as gr
30
- from dotenv import find_dotenv, load_dotenv
31
- import shutil
32
- import random
33
- from utils import get_time, add_frames_linear_interp
34
- from huggingface_hub import hf_hub_download
35
-
36
-
37
- class BlendingFrontend():
38
- def __init__(
39
- self,
40
- sdh,
41
- share=False):
42
- r"""
43
- Gradio Helper Class to collect UI data and start latent blending.
44
- Args:
45
- sdh:
46
- StableDiffusionHolder
47
- share: bool
48
- Set true to get a shareable gradio link (e.g. for running a remote server)
49
- """
50
- self.share = share
51
-
52
- # UI Defaults
53
- self.num_inference_steps = 30
54
- self.depth_strength = 0.25
55
- self.seed1 = 420
56
- self.seed2 = 420
57
- self.prompt1 = ""
58
- self.prompt2 = ""
59
- self.negative_prompt = ""
60
- self.fps = 30
61
- self.duration_video = 8
62
- self.t_compute_max_allowed = 10
63
-
64
- self.lb = LatentBlending(sdh)
65
- self.lb.sdh.num_inference_steps = self.num_inference_steps
66
- self.init_parameters_from_lb()
67
- self.init_save_dir()
68
-
69
- # Vars
70
- self.list_fp_imgs_current = []
71
- self.recycle_img1 = False
72
- self.recycle_img2 = False
73
- self.list_all_segments = []
74
- self.dp_session = ""
75
- self.user_id = None
76
-
77
- def init_parameters_from_lb(self):
78
- r"""
79
- Automatically init parameters from latentblending instance
80
- """
81
- self.height = self.lb.sdh.height
82
- self.width = self.lb.sdh.width
83
- self.guidance_scale = self.lb.guidance_scale
84
- self.guidance_scale_mid_damper = self.lb.guidance_scale_mid_damper
85
- self.mid_compression_scaler = self.lb.mid_compression_scaler
86
- self.branch1_crossfeed_power = self.lb.branch1_crossfeed_power
87
- self.branch1_crossfeed_range = self.lb.branch1_crossfeed_range
88
- self.branch1_crossfeed_decay = self.lb.branch1_crossfeed_decay
89
- self.parental_crossfeed_power = self.lb.parental_crossfeed_power
90
- self.parental_crossfeed_range = self.lb.parental_crossfeed_range
91
- self.parental_crossfeed_power_decay = self.lb.parental_crossfeed_power_decay
92
-
93
- def init_save_dir(self):
94
- r"""
95
- Initializes the directory where stuff is being saved.
96
- You can specify this directory in a ".env" file in your latentblending root, setting
97
- DIR_OUT='/path/to/saving'
98
- """
99
- load_dotenv(find_dotenv(), verbose=False)
100
- self.dp_out = os.getenv("DIR_OUT")
101
- if self.dp_out is None:
102
- self.dp_out = ""
103
- self.dp_imgs = os.path.join(self.dp_out, "imgs")
104
- os.makedirs(self.dp_imgs, exist_ok=True)
105
- self.dp_movies = os.path.join(self.dp_out, "movies")
106
- os.makedirs(self.dp_movies, exist_ok=True)
107
- self.save_empty_image()
108
-
109
- def save_empty_image(self):
110
- r"""
111
- Saves an empty/black dummy image.
112
- """
113
- self.fp_img_empty = os.path.join(self.dp_imgs, 'empty.jpg')
114
- Image.fromarray(np.zeros((self.height, self.width, 3), dtype=np.uint8)).save(self.fp_img_empty, quality=5)
115
-
116
- def randomize_seed1(self):
117
- r"""
118
- Randomizes the first seed
119
- """
120
- seed = np.random.randint(0, 10000000)
121
- self.seed1 = int(seed)
122
- print(f"randomize_seed1: new seed = {self.seed1}")
123
- return seed
124
-
125
- def randomize_seed2(self):
126
- r"""
127
- Randomizes the second seed
128
- """
129
- seed = np.random.randint(0, 10000000)
130
- self.seed2 = int(seed)
131
- print(f"randomize_seed2: new seed = {self.seed2}")
132
- return seed
133
-
134
- def setup_lb(self, list_ui_vals):
135
- r"""
136
- Sets all parameters from the UI. Since gradio does not support to pass dictionaries,
137
- we have to instead pass keys (list_ui_keys, global) and values (list_ui_vals)
138
- """
139
- # Collect latent blending variables
140
- self.lb.set_width(list_ui_vals[list_ui_keys.index('width')])
141
- self.lb.set_height(list_ui_vals[list_ui_keys.index('height')])
142
- self.lb.set_prompt1(list_ui_vals[list_ui_keys.index('prompt1')])
143
- self.lb.set_prompt2(list_ui_vals[list_ui_keys.index('prompt2')])
144
- self.lb.set_negative_prompt(list_ui_vals[list_ui_keys.index('negative_prompt')])
145
- self.lb.guidance_scale = list_ui_vals[list_ui_keys.index('guidance_scale')]
146
- self.lb.guidance_scale_mid_damper = list_ui_vals[list_ui_keys.index('guidance_scale_mid_damper')]
147
- self.t_compute_max_allowed = list_ui_vals[list_ui_keys.index('duration_compute')]
148
- self.lb.num_inference_steps = list_ui_vals[list_ui_keys.index('num_inference_steps')]
149
- self.lb.sdh.num_inference_steps = list_ui_vals[list_ui_keys.index('num_inference_steps')]
150
- self.duration_video = list_ui_vals[list_ui_keys.index('duration_video')]
151
- self.lb.seed1 = list_ui_vals[list_ui_keys.index('seed1')]
152
- self.lb.seed2 = list_ui_vals[list_ui_keys.index('seed2')]
153
- self.lb.branch1_crossfeed_power = list_ui_vals[list_ui_keys.index('branch1_crossfeed_power')]
154
- self.lb.branch1_crossfeed_range = list_ui_vals[list_ui_keys.index('branch1_crossfeed_range')]
155
- self.lb.branch1_crossfeed_decay = list_ui_vals[list_ui_keys.index('branch1_crossfeed_decay')]
156
- self.lb.parental_crossfeed_power = list_ui_vals[list_ui_keys.index('parental_crossfeed_power')]
157
- self.lb.parental_crossfeed_range = list_ui_vals[list_ui_keys.index('parental_crossfeed_range')]
158
- self.lb.parental_crossfeed_power_decay = list_ui_vals[list_ui_keys.index('parental_crossfeed_power_decay')]
159
- self.num_inference_steps = list_ui_vals[list_ui_keys.index('num_inference_steps')]
160
- self.depth_strength = list_ui_vals[list_ui_keys.index('depth_strength')]
161
-
162
- if len(list_ui_vals[list_ui_keys.index('user_id')]) > 1:
163
- self.user_id = list_ui_vals[list_ui_keys.index('user_id')]
164
- else:
165
- # generate new user id
166
- self.user_id = ''.join((random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(8)))
167
- print(f"made new user_id: {self.user_id} at {get_time('second')}")
168
-
169
- def save_latents(self, fp_latents, list_latents):
170
- r"""
171
- Saves a latent trajectory on disk, in npy format.
172
- """
173
- list_latents_cpu = [l.cpu().numpy() for l in list_latents]
174
- np.save(fp_latents, list_latents_cpu)
175
-
176
- def load_latents(self, fp_latents):
177
- r"""
178
- Loads a latent trajectory from disk, converts to torch tensor.
179
- """
180
- list_latents_cpu = np.load(fp_latents)
181
- list_latents = [torch.from_numpy(l).to(self.lb.device) for l in list_latents_cpu]
182
- return list_latents
183
-
184
- def compute_img1(self, *args):
185
- r"""
186
- Computes the first transition image and returns it for display.
187
- Sets all other transition images and last image to empty (as they are obsolete with this operation)
188
- """
189
- list_ui_vals = args
190
- self.setup_lb(list_ui_vals)
191
- fp_img1 = os.path.join(self.dp_imgs, f"img1_{self.user_id}")
192
- img1 = Image.fromarray(self.lb.compute_latents1(return_image=True))
193
- img1.save(fp_img1 + ".jpg")
194
- self.save_latents(fp_img1 + ".npy", self.lb.tree_latents[0])
195
- self.recycle_img1 = True
196
- self.recycle_img2 = False
197
- return [fp_img1 + ".jpg", self.fp_img_empty, self.fp_img_empty, self.fp_img_empty, self.fp_img_empty, self.user_id]
198
-
199
- def compute_img2(self, *args):
200
- r"""
201
- Computes the last transition image and returns it for display.
202
- Sets all other transition images to empty (as they are obsolete with this operation)
203
- """
204
- if not os.path.isfile(os.path.join(self.dp_imgs, f"img1_{self.user_id}.jpg")): # don't do anything
205
- return [self.fp_img_empty, self.fp_img_empty, self.fp_img_empty, self.fp_img_empty, self.user_id]
206
- list_ui_vals = args
207
- self.setup_lb(list_ui_vals)
208
-
209
- self.lb.tree_latents[0] = self.load_latents(os.path.join(self.dp_imgs, f"img1_{self.user_id}.npy"))
210
- fp_img2 = os.path.join(self.dp_imgs, f"img2_{self.user_id}")
211
- img2 = Image.fromarray(self.lb.compute_latents2(return_image=True))
212
- img2.save(fp_img2 + '.jpg')
213
- self.save_latents(fp_img2 + ".npy", self.lb.tree_latents[-1])
214
- self.recycle_img2 = True
215
- # fixme save seeds. change filenames?
216
- return [self.fp_img_empty, self.fp_img_empty, self.fp_img_empty, fp_img2 + ".jpg", self.user_id]
217
-
218
- def compute_transition(self, *args):
219
- r"""
220
- Computes transition images and movie.
221
- """
222
- list_ui_vals = args
223
- self.setup_lb(list_ui_vals)
224
- print("STARTING TRANSITION...")
225
- fixed_seeds = [self.seed1, self.seed2]
226
- # Inject loaded latents (other user interference)
227
- self.lb.tree_latents[0] = self.load_latents(os.path.join(self.dp_imgs, f"img1_{self.user_id}.npy"))
228
- self.lb.tree_latents[-1] = self.load_latents(os.path.join(self.dp_imgs, f"img2_{self.user_id}.npy"))
229
- imgs_transition = self.lb.run_transition(
230
- recycle_img1=self.recycle_img1,
231
- recycle_img2=self.recycle_img2,
232
- num_inference_steps=self.num_inference_steps,
233
- depth_strength=self.depth_strength,
234
- t_compute_max_allowed=self.t_compute_max_allowed,
235
- fixed_seeds=fixed_seeds)
236
- print(f"Latent Blending pass finished ({get_time('second')}). Resulted in {len(imgs_transition)} images")
237
-
238
- # Subselect three preview images
239
- idx_img_prev = np.round(np.linspace(0, len(imgs_transition) - 1, 5)[1:-1]).astype(np.int32)
240
-
241
- list_imgs_preview = []
242
- for j in idx_img_prev:
243
- list_imgs_preview.append(Image.fromarray(imgs_transition[j]))
244
-
245
- # Save the preview imgs as jpgs on disk so we are not sending umcompressed data around
246
- current_timestamp = get_time('second')
247
- self.list_fp_imgs_current = []
248
- for i in range(len(list_imgs_preview)):
249
- fp_img = os.path.join(self.dp_imgs, f"img_preview_{i}_{current_timestamp}.jpg")
250
- list_imgs_preview[i].save(fp_img)
251
- self.list_fp_imgs_current.append(fp_img)
252
- # Insert cheap frames for the movie
253
- imgs_transition_ext = add_frames_linear_interp(imgs_transition, self.duration_video, self.fps)
254
-
255
- # Save as movie
256
- self.fp_movie = self.get_fp_video_last()
257
- if os.path.isfile(self.fp_movie):
258
- os.remove(self.fp_movie)
259
- ms = MovieSaver(self.fp_movie, fps=self.fps)
260
- for img in tqdm(imgs_transition_ext):
261
- ms.write_frame(img)
262
- ms.finalize()
263
- print("DONE SAVING MOVIE! SENDING BACK...")
264
-
265
- # Assemble Output, updating the preview images and le movie
266
- list_return = self.list_fp_imgs_current + [self.fp_movie]
267
- return list_return
268
-
269
- def stack_forward(self, prompt2, seed2):
270
- r"""
271
- Allows to generate multi-segment movies. Sets last image -> first image with all
272
- relevant parameters.
273
- """
274
- # Save preview images, prompts and seeds into dictionary for stacking
275
- if len(self.list_all_segments) == 0:
276
- timestamp_session = get_time('second')
277
- self.dp_session = os.path.join(self.dp_out, f"session_{timestamp_session}")
278
- os.makedirs(self.dp_session)
279
-
280
- idx_segment = len(self.list_all_segments)
281
- dp_segment = os.path.join(self.dp_session, f"segment_{str(idx_segment).zfill(3)}")
282
-
283
- self.list_all_segments.append(dp_segment)
284
- self.lb.write_imgs_transition(dp_segment)
285
-
286
- fp_movie_last = self.get_fp_video_last()
287
- fp_movie_next = self.get_fp_video_next()
288
-
289
- shutil.copyfile(fp_movie_last, fp_movie_next)
290
-
291
- self.lb.tree_latents[0] = self.load_latents(os.path.join(self.dp_imgs, f"img1_{self.user_id}.npy"))
292
- self.lb.tree_latents[-1] = self.load_latents(os.path.join(self.dp_imgs, f"img2_{self.user_id}.npy"))
293
- self.lb.swap_forward()
294
-
295
- shutil.copyfile(os.path.join(self.dp_imgs, f"img2_{self.user_id}.npy"), os.path.join(self.dp_imgs, f"img1_{self.user_id}.npy"))
296
- fp_multi = self.multi_concat()
297
- list_out = [fp_multi]
298
-
299
- list_out.extend([os.path.join(self.dp_imgs, f"img2_{self.user_id}.jpg")])
300
- list_out.extend([self.fp_img_empty] * 4)
301
- list_out.append(gr.update(interactive=False, value=prompt2))
302
- list_out.append(gr.update(interactive=False, value=seed2))
303
- list_out.append("")
304
- list_out.append(np.random.randint(0, 10000000))
305
- print(f"stack_forward: fp_multi {fp_multi}")
306
- return list_out
307
-
308
- def multi_concat(self):
309
- r"""
310
- Concatentates all stacked segments into one long movie.
311
- """
312
- list_fp_movies = self.get_fp_video_all()
313
- # Concatenate movies and save
314
- fp_final = os.path.join(self.dp_session, f"concat_{self.user_id}.mp4")
315
- concatenate_movies(fp_final, list_fp_movies)
316
- return fp_final
317
-
318
- def get_fp_video_all(self):
319
- r"""
320
- Collects all stacked movie segments.
321
- """
322
- list_all = os.listdir(self.dp_movies)
323
- str_beg = f"movie_{self.user_id}_"
324
- list_user = [l for l in list_all if str_beg in l]
325
- list_user.sort()
326
- list_user = [os.path.join(self.dp_movies, l) for l in list_user]
327
- return list_user
328
-
329
- def get_fp_video_next(self):
330
- r"""
331
- Gets the filepath of the next movie segment.
332
- """
333
- list_videos = self.get_fp_video_all()
334
- if len(list_videos) == 0:
335
- idx_next = 0
336
- else:
337
- idx_next = len(list_videos)
338
- fp_video_next = os.path.join(self.dp_movies, f"movie_{self.user_id}_{str(idx_next).zfill(3)}.mp4")
339
- return fp_video_next
340
-
341
- def get_fp_video_last(self):
342
- r"""
343
- Gets the current video that was saved.
344
- """
345
- fp_video_last = os.path.join(self.dp_movies, f"last_{self.user_id}.mp4")
346
- return fp_video_last
347
-
348
-
349
- if __name__ == "__main__":
350
- fp_ckpt = hf_hub_download(repo_id="stabilityai/stable-diffusion-2-1-base", filename="v2-1_512-ema-pruned.ckpt")
351
- # fp_ckpt = hf_hub_download(repo_id="stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.ckpt")
352
- bf = BlendingFrontend(StableDiffusionHolder(fp_ckpt))
353
- # self = BlendingFrontend(None)
354
-
355
- with gr.Blocks() as demo:
356
- with gr.Row():
357
- prompt1 = gr.Textbox(label="prompt 1")
358
- prompt2 = gr.Textbox(label="prompt 2")
359
-
360
- with gr.Row():
361
- duration_compute = gr.Slider(5, 200, bf.t_compute_max_allowed, step=1, label='compute budget', interactive=True)
362
- duration_video = gr.Slider(1, 100, bf.duration_video, step=0.1, label='video duration', interactive=True)
363
- height = gr.Slider(256, 2048, bf.height, step=128, label='height', interactive=True)
364
- width = gr.Slider(256, 2048, bf.width, step=128, label='width', interactive=True)
365
-
366
- with gr.Accordion("Advanced Settings (click to expand)", open=False):
367
-
368
- with gr.Accordion("Diffusion settings", open=True):
369
- with gr.Row():
370
- num_inference_steps = gr.Slider(5, 100, bf.num_inference_steps, step=1, label='num_inference_steps', interactive=True)
371
- guidance_scale = gr.Slider(1, 25, bf.guidance_scale, step=0.1, label='guidance_scale', interactive=True)
372
- negative_prompt = gr.Textbox(label="negative prompt")
373
-
374
- with gr.Accordion("Seed control: adjust seeds for first and last images", open=True):
375
- with gr.Row():
376
- b_newseed1 = gr.Button("randomize seed 1", variant='secondary')
377
- seed1 = gr.Number(bf.seed1, label="seed 1", interactive=True)
378
- seed2 = gr.Number(bf.seed2, label="seed 2", interactive=True)
379
- b_newseed2 = gr.Button("randomize seed 2", variant='secondary')
380
-
381
- with gr.Accordion("Last image crossfeeding.", open=True):
382
- with gr.Row():
383
- branch1_crossfeed_power = gr.Slider(0.0, 1.0, bf.branch1_crossfeed_power, step=0.01, label='branch1 crossfeed power', interactive=True)
384
- branch1_crossfeed_range = gr.Slider(0.0, 1.0, bf.branch1_crossfeed_range, step=0.01, label='branch1 crossfeed range', interactive=True)
385
- branch1_crossfeed_decay = gr.Slider(0.0, 1.0, bf.branch1_crossfeed_decay, step=0.01, label='branch1 crossfeed decay', interactive=True)
386
-
387
- with gr.Accordion("Transition settings", open=True):
388
- with gr.Row():
389
- parental_crossfeed_power = gr.Slider(0.0, 1.0, bf.parental_crossfeed_power, step=0.01, label='parental crossfeed power', interactive=True)
390
- parental_crossfeed_range = gr.Slider(0.0, 1.0, bf.parental_crossfeed_range, step=0.01, label='parental crossfeed range', interactive=True)
391
- parental_crossfeed_power_decay = gr.Slider(0.0, 1.0, bf.parental_crossfeed_power_decay, step=0.01, label='parental crossfeed decay', interactive=True)
392
- with gr.Row():
393
- depth_strength = gr.Slider(0.01, 0.99, bf.depth_strength, step=0.01, label='depth_strength', interactive=True)
394
- guidance_scale_mid_damper = gr.Slider(0.01, 2.0, bf.guidance_scale_mid_damper, step=0.01, label='guidance_scale_mid_damper', interactive=True)
395
-
396
- with gr.Row():
397
- b_compute1 = gr.Button('compute first image', variant='primary')
398
- b_compute_transition = gr.Button('compute transition', variant='primary')
399
- b_compute2 = gr.Button('compute last image', variant='primary')
400
-
401
- with gr.Row():
402
- img1 = gr.Image(label="1/5")
403
- img2 = gr.Image(label="2/5", show_progress=False)
404
- img3 = gr.Image(label="3/5", show_progress=False)
405
- img4 = gr.Image(label="4/5", show_progress=False)
406
- img5 = gr.Image(label="5/5")
407
-
408
- with gr.Row():
409
- vid_single = gr.Video(label="current single trans")
410
- vid_multi = gr.Video(label="concatented multi trans")
411
-
412
- with gr.Row():
413
- b_stackforward = gr.Button('append last movie segment (left) to multi movie (right)', variant='primary')
414
-
415
- with gr.Row():
416
- gr.Markdown(
417
- """
418
- # Parameters
419
- ## Main
420
- - compute budget: set your waiting time for the transition. high values = better quality
421
- - video duration: seconds per segment
422
- - height/width: in pixels
423
-
424
- ## Diffusion settings
425
- - num_inference_steps: number of diffusion steps
426
- - guidance_scale: latent blending seems to prefer lower values here
427
- - negative prompt: enter negative prompt here, applied for all images
428
-
429
- ## Last image crossfeeding
430
- - branch1_crossfeed_power: Controls the level of cross-feeding between the first and last image branch. For preserving structures.
431
- - branch1_crossfeed_range: Sets the duration of active crossfeed during development. High values enforce strong structural similarity.
432
- - branch1_crossfeed_decay: Sets decay for branch1_crossfeed_power. Lower values make the decay stronger across the range.
433
-
434
- ## Transition settings
435
- - parental_crossfeed_power: Similar to branch1_crossfeed_power, however applied for the images withinin the transition.
436
- - parental_crossfeed_range: Similar to branch1_crossfeed_range, however applied for the images withinin the transition.
437
- - parental_crossfeed_power_decay: Similar to branch1_crossfeed_decay, however applied for the images withinin the transition.
438
- - depth_strength: Determines when the blending process will begin in terms of diffusion steps. Low values more inventive but can cause motion.
439
- - guidance_scale_mid_damper: Decreases the guidance scale in the middle of a transition.
440
- """)
441
-
442
- with gr.Row():
443
- user_id = gr.Textbox(label="user id", interactive=False)
444
-
445
- # Collect all UI elemts in list to easily pass as inputs in gradio
446
- dict_ui_elem = {}
447
- dict_ui_elem["prompt1"] = prompt1
448
- dict_ui_elem["negative_prompt"] = negative_prompt
449
- dict_ui_elem["prompt2"] = prompt2
450
-
451
- dict_ui_elem["duration_compute"] = duration_compute
452
- dict_ui_elem["duration_video"] = duration_video
453
- dict_ui_elem["height"] = height
454
- dict_ui_elem["width"] = width
455
-
456
- dict_ui_elem["depth_strength"] = depth_strength
457
- dict_ui_elem["branch1_crossfeed_power"] = branch1_crossfeed_power
458
- dict_ui_elem["branch1_crossfeed_range"] = branch1_crossfeed_range
459
- dict_ui_elem["branch1_crossfeed_decay"] = branch1_crossfeed_decay
460
-
461
- dict_ui_elem["num_inference_steps"] = num_inference_steps
462
- dict_ui_elem["guidance_scale"] = guidance_scale
463
- dict_ui_elem["guidance_scale_mid_damper"] = guidance_scale_mid_damper
464
- dict_ui_elem["seed1"] = seed1
465
- dict_ui_elem["seed2"] = seed2
466
-
467
- dict_ui_elem["parental_crossfeed_range"] = parental_crossfeed_range
468
- dict_ui_elem["parental_crossfeed_power"] = parental_crossfeed_power
469
- dict_ui_elem["parental_crossfeed_power_decay"] = parental_crossfeed_power_decay
470
- dict_ui_elem["user_id"] = user_id
471
-
472
- # Convert to list, as gradio doesn't seem to accept dicts
473
- list_ui_vals = []
474
- list_ui_keys = []
475
- for k in dict_ui_elem.keys():
476
- list_ui_vals.append(dict_ui_elem[k])
477
- list_ui_keys.append(k)
478
- bf.list_ui_keys = list_ui_keys
479
-
480
- b_newseed1.click(bf.randomize_seed1, outputs=seed1)
481
- b_newseed2.click(bf.randomize_seed2, outputs=seed2)
482
- b_compute1.click(bf.compute_img1, inputs=list_ui_vals, outputs=[img1, img2, img3, img4, img5, user_id])
483
- b_compute2.click(bf.compute_img2, inputs=list_ui_vals, outputs=[img2, img3, img4, img5, user_id])
484
- b_compute_transition.click(bf.compute_transition,
485
- inputs=list_ui_vals,
486
- outputs=[img2, img3, img4, vid_single])
487
-
488
- b_stackforward.click(bf.stack_forward,
489
- inputs=[prompt2, seed2],
490
- outputs=[vid_multi, img1, img2, img3, img4, img5, prompt1, seed1, prompt2])
491
-
492
- demo.launch(share=bf.share, inbrowser=True, inline=False)