LiruiZhao commited on
Commit
0c992a8
1 Parent(s): db29364
Files changed (2) hide show
  1. app.py +2 -13
  2. test/road_sign.png +0 -0
app.py CHANGED
@@ -81,12 +81,7 @@ class CompVisDenoiser(K.external.CompVisDenoiser):
81
  return self.inner_model.apply_model(*args, **kwargs)
82
 
83
  def forward(self, input_0, input_1, sigma, **kwargs):
84
- print("input_0.device:", input_0.device)
85
- print("input_1.device:", input_1.device)
86
  c_out, c_in = [append_dims(x, input_0.ndim) for x in self.get_scalings(sigma)]
87
- print("c_in.device:", c_in.device)
88
- print("c_out.device:", c_out.device)
89
- print("sigma.device:", sigma.device)
90
  # eps_0, eps_1 = self.get_eps(input_0 * c_in, input_1 * c_in, self.sigma_to_t(sigma), **kwargs)
91
  eps_0, eps_1 = self.get_eps(input_0 * c_in, self.sigma_to_t(sigma.cpu().float()).cuda(), **kwargs)
92
 
@@ -164,7 +159,7 @@ model_wrap = CompVisDenoiser(model)
164
  model_wrap_cfg = CFGDenoiser(model_wrap)
165
  null_token = model.get_learned_conditioning([""])
166
 
167
- @spaces.GPU(duration=20)
168
  def generate(
169
  input_image: Image.Image,
170
  instruction: str,
@@ -205,12 +200,8 @@ def generate(
205
  uncond["c_crossattn"] = [null_token.to(model.device)]
206
  uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])]
207
 
208
- print("cond['c_crossattn'][0].device:", cond["c_crossattn"][0].device)
209
- print("cond['c_concat'][0].device:", cond["c_concat"][0].device)
210
- print("uncond['c_crossattn'][0].device:", uncond["c_crossattn"][0].device)
211
- print("uncond['c_concat'][0].device:", uncond["c_concat"][0].device)
212
 
213
- sigmas = model_wrap.get_sigmas(steps).to(model.device)
214
 
215
  extra_args = {
216
  "cond": cond,
@@ -221,8 +212,6 @@ def generate(
221
  torch.manual_seed(seed)
222
  z_0 = torch.randn_like(cond["c_concat"][0]).to(model.device) * sigmas[0]
223
  z_1 = torch.randn_like(cond["c_concat"][0]).to(model.device) * sigmas[0]
224
- print("z_0.device:", z_0.device)
225
- print("z_1.device:", z_1.device)
226
 
227
  z_0, z_1, image_list, mask_list = sample_euler_ancestral(model_wrap_cfg, z_0, z_1, sigmas, height, width, extra_args=extra_args)
228
 
 
81
  return self.inner_model.apply_model(*args, **kwargs)
82
 
83
  def forward(self, input_0, input_1, sigma, **kwargs):
 
 
84
  c_out, c_in = [append_dims(x, input_0.ndim) for x in self.get_scalings(sigma)]
 
 
 
85
  # eps_0, eps_1 = self.get_eps(input_0 * c_in, input_1 * c_in, self.sigma_to_t(sigma), **kwargs)
86
  eps_0, eps_1 = self.get_eps(input_0 * c_in, self.sigma_to_t(sigma.cpu().float()).cuda(), **kwargs)
87
 
 
159
  model_wrap_cfg = CFGDenoiser(model_wrap)
160
  null_token = model.get_learned_conditioning([""])
161
 
162
+ @spaces.GPU(duration=200)
163
  def generate(
164
  input_image: Image.Image,
165
  instruction: str,
 
200
  uncond["c_crossattn"] = [null_token.to(model.device)]
201
  uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])]
202
 
 
 
 
 
203
 
204
+ sigmas = model_wrap.get_sigmas(steps)
205
 
206
  extra_args = {
207
  "cond": cond,
 
212
  torch.manual_seed(seed)
213
  z_0 = torch.randn_like(cond["c_concat"][0]).to(model.device) * sigmas[0]
214
  z_1 = torch.randn_like(cond["c_concat"][0]).to(model.device) * sigmas[0]
 
 
215
 
216
  z_0, z_1, image_list, mask_list = sample_euler_ancestral(model_wrap_cfg, z_0, z_1, sigmas, height, width, extra_args=extra_args)
217
 
test/road_sign.png CHANGED