parokshsaxena commited on
Commit
45cc2fd
β€’
1 Parent(s): 9da72e5

moving width and height to varible and setting width and height to one given by shein

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -121,6 +121,9 @@ pipe = TryonPipeline.from_pretrained(
121
  )
122
  pipe.unet_encoder = UNet_Encoder
123
 
 
 
 
124
  @spaces.GPU
125
  def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
126
  device = "cuda"
@@ -129,7 +132,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
129
  pipe.to(device)
130
  pipe.unet_encoder.to(device)
131
 
132
- garm_img= garm_img.convert("RGB").resize((768,1024))
133
  human_img_orig = dict["background"].convert("RGB")
134
 
135
  if is_checked_crop:
@@ -142,25 +145,25 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
142
  bottom = (height + target_height) / 2
143
  cropped_img = human_img_orig.crop((left, top, right, bottom))
144
  crop_size = cropped_img.size
145
- human_img = cropped_img.resize((768,1024))
146
  else:
147
- human_img = human_img_orig.resize((768,1024))
148
 
149
 
150
  if is_checked:
151
- keypoints = openpose_model(human_img.resize((384,512)))
152
- model_parse, _ = parsing_model(human_img.resize((384,512)))
153
  mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
154
- mask = mask.resize((768,1024))
155
  else:
156
- mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
157
  # mask = transforms.ToTensor()(mask)
158
  # mask = mask.unsqueeze(0)
159
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
160
  mask_gray = to_pil_image((mask_gray+1.0)/2.0)
161
 
162
 
163
- human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
164
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
165
 
166
 
@@ -169,7 +172,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
169
  # verbosity = getattr(args, "verbosity", None)
170
  pose_img = args.func(args,human_img_arg)
171
  pose_img = pose_img[:,:,::-1]
172
- pose_img = Image.fromarray(pose_img).resize((768,1024))
173
 
174
  with torch.no_grad():
175
  # Extract the images
@@ -227,9 +230,9 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
227
  cloth = garm_tensor.to(device,torch.float16),
228
  mask_image=mask,
229
  image=human_img,
230
- height=1024,
231
- width=768,
232
- ip_adapter_image = garm_img.resize((768,1024)),
233
  guidance_scale=2.0,
234
  )[0]
235
 
 
121
  )
122
  pipe.unet_encoder = UNet_Encoder
123
 
124
+ WIDTH = 4160 # 768
125
+ HEIGHT = 6240 # 1024
126
+
127
  @spaces.GPU
128
  def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
129
  device = "cuda"
 
132
  pipe.to(device)
133
  pipe.unet_encoder.to(device)
134
 
135
+ garm_img= garm_img.convert("RGB").resize((WIDTH,HEIGHT))
136
  human_img_orig = dict["background"].convert("RGB")
137
 
138
  if is_checked_crop:
 
145
  bottom = (height + target_height) / 2
146
  cropped_img = human_img_orig.crop((left, top, right, bottom))
147
  crop_size = cropped_img.size
148
+ human_img = cropped_img.resize((WIDTH, HEIGHT))
149
  else:
150
+ human_img = human_img_orig.resize((WIDTH, HEIGHT))
151
 
152
 
153
  if is_checked:
154
+ keypoints = openpose_model(human_img.resize((WIDTH/2,HEIGHT/2)))
155
+ model_parse, _ = parsing_model(human_img.resize((WIDTH/2,HEIGHT/2)))
156
  mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
157
+ mask = mask.resize((WIDTH,HEIGHT))
158
  else:
159
+ mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((WIDTH, HEIGHT)))
160
  # mask = transforms.ToTensor()(mask)
161
  # mask = mask.unsqueeze(0)
162
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
163
  mask_gray = to_pil_image((mask_gray+1.0)/2.0)
164
 
165
 
166
+ human_img_arg = _apply_exif_orientation(human_img.resize((WIDTH/2, HEIGHT/2)))
167
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
168
 
169
 
 
172
  # verbosity = getattr(args, "verbosity", None)
173
  pose_img = args.func(args,human_img_arg)
174
  pose_img = pose_img[:,:,::-1]
175
+ pose_img = Image.fromarray(pose_img).resize((WIDTH,HEIGHT))
176
 
177
  with torch.no_grad():
178
  # Extract the images
 
230
  cloth = garm_tensor.to(device,torch.float16),
231
  mask_image=mask,
232
  image=human_img,
233
+ height=HEIGHT,
234
+ width=WIDTH,
235
+ ip_adapter_image = garm_img.resize((WIDTH,HEIGHT)),
236
  guidance_scale=2.0,
237
  )[0]
238