Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -97,7 +97,7 @@ class ModelWrapper:
|
|
97 |
|
98 |
for constant in all_timesteps:
|
99 |
current_timesteps = torch.ones(len(prompt_embed), device="cuda", dtype=torch.long) * constant
|
100 |
-
|
101 |
print(f'current_timestpes: {current_timesteps.dtype}')
|
102 |
eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions).sample
|
103 |
print(type(eval_images))
|
@@ -237,4 +237,4 @@ def create_demo():
|
|
237 |
if __name__ == "__main__":
|
238 |
demo = create_demo()
|
239 |
demo.queue(api_open=False)
|
240 |
-
demo.launch(show_error=True
|
|
|
97 |
|
98 |
for constant in all_timesteps:
|
99 |
current_timesteps = torch.ones(len(prompt_embed), device="cuda", dtype=torch.long) * constant
|
100 |
+
current_timesteps = current_timesteps.to(torch.float16)
|
101 |
print(f'current_timestpes: {current_timesteps.dtype}')
|
102 |
eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions).sample
|
103 |
print(type(eval_images))
|
|
|
237 |
if __name__ == "__main__":
|
238 |
demo = create_demo()
|
239 |
demo.queue(api_open=False)
|
240 |
+
demo.launch(show_error=True)
|