Update app.py
Browse files
app.py
CHANGED
@@ -90,7 +90,7 @@ class ModelWrapper:
|
|
90 |
else:
|
91 |
raise NotImplementedError()
|
92 |
|
93 |
-
|
94 |
print(f'noise: {noise.dtype}')
|
95 |
#prompt_embed = prompt_embed.to(torch.float32)
|
96 |
DTYPE = prompt_embed.dtype
|
@@ -100,7 +100,7 @@ class ModelWrapper:
|
|
100 |
current_timesteps = torch.ones(len(prompt_embed), device="cuda", dtype=torch.long) * constant
|
101 |
#current_timesteps = current_timesteps.to(torch.float32)
|
102 |
print(f'current_timestpes: {current_timesteps.dtype}')
|
103 |
-
eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions)
|
104 |
print(eval_images.dtype)
|
105 |
eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
|
106 |
print(eval_images.dtype)
|
@@ -140,7 +140,7 @@ class ModelWrapper:
|
|
140 |
)
|
141 |
|
142 |
unet_added_conditions = {
|
143 |
-
"time_ids": add_time_ids
|
144 |
"text_embeds": batch_pooled_prompt_embeds.squeeze(1)
|
145 |
}
|
146 |
|
|
|
90 |
else:
|
91 |
raise NotImplementedError()
|
92 |
|
93 |
+
noise = noise.to(torch.float16)
|
94 |
print(f'noise: {noise.dtype}')
|
95 |
#prompt_embed = prompt_embed.to(torch.float32)
|
96 |
DTYPE = prompt_embed.dtype
|
|
|
100 |
current_timesteps = torch.ones(len(prompt_embed), device="cuda", dtype=torch.long) * constant
|
101 |
#current_timesteps = current_timesteps.to(torch.float32)
|
102 |
print(f'current_timestpes: {current_timesteps.dtype}')
|
103 |
+
eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions)
|
104 |
print(eval_images.dtype)
|
105 |
eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
|
106 |
print(eval_images.dtype)
|
|
|
140 |
)
|
141 |
|
142 |
unet_added_conditions = {
|
143 |
+
"time_ids": add_time_ids,
|
144 |
"text_embeds": batch_pooled_prompt_embeds.squeeze(1)
|
145 |
}
|
146 |
|