Update gradio_app.py
Browse files- gradio_app.py +4 -4
gradio_app.py
CHANGED
@@ -142,7 +142,7 @@ def get_image(image1, prompt, image2, dim_steps=50, ddim_eta=1., fs=None, seed=1
|
|
142 |
|
143 |
image1 = torch.from_numpy(image1).permute(2, 0, 1).float().cuda()
|
144 |
input_h, input_w = image1.shape[1:]
|
145 |
-
image1 = (
|
146 |
|
147 |
image2 = torch.from_numpy(image2).permute(2, 0, 1).float().cuda()
|
148 |
input_h, input_w = image2.shape[1:]
|
@@ -153,15 +153,15 @@ def get_image(image1, prompt, image2, dim_steps=50, ddim_eta=1., fs=None, seed=1
|
|
153 |
image_tensor1 = transform(image1).unsqueeze(1) # [c,1,h,w]
|
154 |
# image2 = Image.open(file_list[2*idx+1]).convert('RGB')
|
155 |
image_tensor2 = transform(image2).unsqueeze(1) # [c,1,h,w]
|
156 |
-
frame_tensor1 = repeat(image_tensor1, 'c t h w -> c (repeat t) h w', repeat=
|
157 |
-
frame_tensor2 = repeat(image_tensor2, 'c t h w -> c (repeat t) h w', repeat=
|
158 |
videos = torch.cat([frame_tensor1, frame_tensor2], dim=1)
|
159 |
# frame_tensor = torch.cat([frame_tensor1, frame_tensor1], dim=1)
|
160 |
# _, filename = os.path.split(file_list[idx*2])
|
161 |
|
162 |
ddim_sampler = DDIMSampler(model) if not multiple_cond_cfg else DDIMSampler_multicond(model)
|
163 |
batch_size = noise_shape[0]
|
164 |
-
fs = torch.tensor([fs]
|
165 |
|
166 |
if not text_input:
|
167 |
prompts = [""]*batch_size
|
|
|
142 |
|
143 |
image1 = torch.from_numpy(image1).permute(2, 0, 1).float().cuda()
|
144 |
input_h, input_w = image1.shape[1:]
|
145 |
+
image1 = (image1 / 255. - 0.5) * 2
|
146 |
|
147 |
image2 = torch.from_numpy(image2).permute(2, 0, 1).float().cuda()
|
148 |
input_h, input_w = image2.shape[1:]
|
|
|
153 |
image_tensor1 = transform(image1).unsqueeze(1) # [c,1,h,w]
|
154 |
# image2 = Image.open(file_list[2*idx+1]).convert('RGB')
|
155 |
image_tensor2 = transform(image2).unsqueeze(1) # [c,1,h,w]
|
156 |
+
frame_tensor1 = repeat(image_tensor1, 'c t h w -> c (repeat t) h w', repeat=8)
|
157 |
+
frame_tensor2 = repeat(image_tensor2, 'c t h w -> c (repeat t) h w', repeat=8)
|
158 |
videos = torch.cat([frame_tensor1, frame_tensor2], dim=1)
|
159 |
# frame_tensor = torch.cat([frame_tensor1, frame_tensor1], dim=1)
|
160 |
# _, filename = os.path.split(file_list[idx*2])
|
161 |
|
162 |
ddim_sampler = DDIMSampler(model) if not multiple_cond_cfg else DDIMSampler_multicond(model)
|
163 |
batch_size = noise_shape[0]
|
164 |
+
fs = torch.tensor([fs], dtype=torch.long, device=model.device)
|
165 |
|
166 |
if not text_input:
|
167 |
prompts = [""]*batch_size
|