ozyman commited on
Commit
10da50f
·
1 Parent(s): 8577ade

fixed color pallete

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -30,14 +30,16 @@ import os
30
  os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
31
  os.environ['OMP_NUM_THREADS'] = '4'
32
 
33
- app_version = 'dsdg_vid_2'
34
 
35
  device = torch.device("cpu")
36
  labels = ['Live', 'Spoof']
37
  PIX_THRESHOLD = 0.45
38
- DSDG_THRESHOLD = 50.0
39
  DSDG_FACTOR = 1000000
 
40
  MIN_FACE_WIDTH_THRESHOLD = 210
 
41
  examples = [
42
  ['examples/1_1_21_2_33_scene_fake.jpg'],
43
  ['examples/frame150_real.jpg'],
@@ -50,9 +52,9 @@ tfms = transforms.Compose([
50
  transforms.ToTensor(),
51
  transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
52
  ])
53
- deepix_model = DeePixBiS(pretrained=False)
54
- deepix_model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
55
- deepix_model.eval()
56
 
57
 
58
  depth_config_path = 'tddfa/configs/mb1_120x120.yml' # 'tddfa/configs/mb1_120x120.yml
@@ -166,7 +168,7 @@ def prepare_data_dsdg(images, boxes, depths):
166
  image_x[i, :, :, :] = cv.resize(image, (256, 256))
167
  # transform to binary mask --> threshold = 0
168
  depth_x[i, :, :] = cv.resize(depth_img, (32, 32))
169
- image_x = image_x[:, :, :, ::-1].transpose((0, 3, 1, 2))
170
  image_x = transform(image_x)
171
  image_x = torch.from_numpy(image_x.astype(float)).float()
172
  depth_x = torch.from_numpy(depth_x.astype(float)).float()
@@ -189,15 +191,12 @@ def dsdg_model_inference(imgs, bboxes, depth_imgs):
189
  for frame_t in range(inputs.shape[1]):
190
  mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = cdcn_model(inputs[:, frame_t, :, :, :])
191
  score_norm = torch.sum(mu) / torch.sum(test_maps[:, frame_t, :, :])
192
- scores.append(score_norm.item() * DSDG_FACTOR)
 
 
 
193
  map_score += score_norm
194
- map_score = map_score / inputs.shape[1]
195
- map_score_list.append(map_score)
196
- res_dsdg = map_score_list[0].item()
197
- if res_dsdg > 10:
198
- res_dsdg = 0.0
199
- res_dsdg = res_dsdg * DSDG_FACTOR
200
- return res_dsdg, scores
201
 
202
 
203
  def inference(img, dsdg_thresh):
@@ -244,7 +243,8 @@ def process_video(vid_path, dsdg_thresh):
244
  if not inference_images:
245
  return vid_path, {'Not supported right now': 0}, -1, vid_path, 'Faces too small or not found', -1
246
 
247
- res_dsdg, scores = dsdg_model_inference(inference_images, inference_bboxes, inference_depths)
 
248
  cls_dsdg = 'Real' if res_dsdg >= dsdg_thresh else 'Spoof'
249
  for img, bbox, score in zip(inference_images, inference_bboxes, scores):
250
  x, y, x2, y2 = bbox
 
30
  os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
31
  os.environ['OMP_NUM_THREADS'] = '4'
32
 
33
+ app_version = 'dsdg_vid_3'
34
 
35
  device = torch.device("cpu")
36
  labels = ['Live', 'Spoof']
37
  PIX_THRESHOLD = 0.45
38
+ DSDG_THRESHOLD = 80.0
39
  DSDG_FACTOR = 1000000
40
+ DSDG_PERCENTILE = 40
41
  MIN_FACE_WIDTH_THRESHOLD = 210
42
+
43
  examples = [
44
  ['examples/1_1_21_2_33_scene_fake.jpg'],
45
  ['examples/frame150_real.jpg'],
 
52
  transforms.ToTensor(),
53
  transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
54
  ])
55
+ # deepix_model = DeePixBiS(pretrained=False)
56
+ # deepix_model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
57
+ # deepix_model.eval()
58
 
59
 
60
  depth_config_path = 'tddfa/configs/mb1_120x120.yml' # 'tddfa/configs/mb1_120x120.yml
 
168
  image_x[i, :, :, :] = cv.resize(image, (256, 256))
169
  # transform to binary mask --> threshold = 0
170
  depth_x[i, :, :] = cv.resize(depth_img, (32, 32))
171
+ image_x = image_x.transpose((0, 3, 1, 2))
172
  image_x = transform(image_x)
173
  image_x = torch.from_numpy(image_x.astype(float)).float()
174
  depth_x = torch.from_numpy(depth_x.astype(float)).float()
 
191
  for frame_t in range(inputs.shape[1]):
192
  mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = cdcn_model(inputs[:, frame_t, :, :, :])
193
  score_norm = torch.sum(mu) / torch.sum(test_maps[:, frame_t, :, :])
194
+ score = score_norm.item()
195
+ if score > 10:
196
+ score = 0.0
197
+ scores.append(score * DSDG_FACTOR)
198
  map_score += score_norm
199
+ return scores
 
 
 
 
 
 
200
 
201
 
202
  def inference(img, dsdg_thresh):
 
243
  if not inference_images:
244
  return vid_path, {'Not supported right now': 0}, -1, vid_path, 'Faces too small or not found', -1
245
 
246
+ scores = dsdg_model_inference(inference_images, inference_bboxes, inference_depths)
247
+ res_dsdg = np.percentile(scores, DSDG_PERCENTILE)
248
  cls_dsdg = 'Real' if res_dsdg >= dsdg_thresh else 'Spoof'
249
  for img, bbox, score in zip(inference_images, inference_bboxes, scores):
250
  x, y, x2, y2 = bbox