Jacob Logas commited on
Commit
13bbd52
·
1 Parent(s): 23f9960
Files changed (2) hide show
  1. app.py +19 -13
  2. util/attack_utils.py +6 -4
app.py CHANGED
@@ -11,7 +11,6 @@ import torchvision.transforms as transforms
11
  import spaces
12
 
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
- print(device)
15
  to_tensor = transforms.ToTensor()
16
 
17
  eps = 0.05
@@ -40,20 +39,27 @@ direction = 1
40
  crop_size = 112
41
  scale = crop_size / 112.0
42
 
43
- models_attack, V_reduction, dim = prepare_models(
44
- model_backbones,
45
- input_size,
46
- model_roots,
47
- kernel_size_gf,
48
- sigma_gf,
49
- combination,
50
- using_subspace,
51
- V_reduction_root,
52
- )
53
 
54
 
55
  @spaces.GPU
56
- def protect(img):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  img = Image.fromarray(img)
58
  reference = get_reference_facial_points(default_square=True) * scale
59
  h, w, c = np.array(img).shape
@@ -104,7 +110,7 @@ def protect(img):
104
  theta_warp=theta,
105
  V_reduction=V_reduction,
106
  )
107
- img_attacked = attack.execute(tensor_img, dir_vec, direction).detach().cpu()
108
 
109
  img_attacked_pil = transforms.ToPILImage()(img_attacked[0])
110
  return img_attacked_pil
 
11
  import spaces
12
 
13
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
14
  to_tensor = transforms.ToTensor()
15
 
16
  eps = 0.05
 
39
  crop_size = 112
40
  scale = crop_size / 112.0
41
 
42
+ for root in model_roots:
43
+ torch.hub.load_state_dict_from_url(root, map_location=device, progress=True)
 
 
 
 
 
 
 
 
44
 
45
 
46
  @spaces.GPU
47
+ def execute(attack, tensor_img, dir_vec):
48
+ return attack.execute(tensor_img, dir_vec, direction).detach().cpu()
49
+
50
+
51
+ def protect(img, progress=gr.Progress(track_tqdm=True)):
52
+ models_attack, V_reduction, dim = prepare_models(
53
+ model_backbones,
54
+ input_size,
55
+ model_roots,
56
+ kernel_size_gf,
57
+ sigma_gf,
58
+ combination,
59
+ using_subspace,
60
+ V_reduction_root,
61
+ )
62
+
63
  img = Image.fromarray(img)
64
  reference = get_reference_facial_points(default_square=True) * scale
65
  h, w, c = np.array(img).shape
 
110
  theta_warp=theta,
111
  V_reduction=V_reduction,
112
  )
113
+ img_attacked = execute(attack, tensor_img, dir_vec)
114
 
115
  img_attacked_pil = transforms.ToPILImage()(img_attacked[0])
116
  return img_attacked_pil
util/attack_utils.py CHANGED
@@ -6,8 +6,8 @@ from torch.autograd import Variable
6
  from util.feature_extraction_utils import warp_image, normalize_batch
7
  from util.prepare_utils import get_ensemble, extract_features
8
  from lpips_pytorch import LPIPS
 
9
 
10
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  tensor_transform = transforms.ToTensor()
12
  pil_transform = transforms.ToPILImage()
13
 
@@ -51,9 +51,11 @@ class Attack(nn.Module):
51
  self.warp = warp
52
  self.theta_warp = theta_warp
53
  if self.attack_type == "lpips":
54
- self.lpips_loss = LPIPS(self.net_type).to(device)
55
 
56
  def execute(self, images, dir_vec, direction):
 
 
57
  images = Variable(images).to(device)
58
  dir_vec = dir_vec.to(device)
59
  # take norm wrt dim
@@ -76,7 +78,7 @@ class Attack(nn.Module):
76
  images.detach().clone() + noise_uniform, requires_grad=True
77
  ).to(device)
78
 
79
- for i in range(self.n_iters):
80
  adv_features = extract_features(
81
  adv_images, self.extractor_ens, self.dim
82
  ).to(device)
@@ -115,10 +117,10 @@ class Attack(nn.Module):
115
  else:
116
  adv_images[dist > dist_old] = adv_images_old[dist > dist_old]
117
  dist[dist > dist_old] = dist_old[dist > dist_old]
118
-
119
  return adv_images.detach().cpu()
120
 
121
  def lpips_reg(self, images, adv_images):
 
122
  if self.warp:
123
  face_adv = warp_image(adv_images, self.theta_warp)
124
  lpips_out = self.lpips_loss(
 
6
  from util.feature_extraction_utils import warp_image, normalize_batch
7
  from util.prepare_utils import get_ensemble, extract_features
8
  from lpips_pytorch import LPIPS
9
+ from tqdm import trange
10
 
 
11
  tensor_transform = transforms.ToTensor()
12
  pil_transform = transforms.ToPILImage()
13
 
 
51
  self.warp = warp
52
  self.theta_warp = theta_warp
53
  if self.attack_type == "lpips":
54
+ self.lpips_loss = LPIPS(self.net_type)
55
 
56
  def execute(self, images, dir_vec, direction):
57
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
58
+ self.lpips_loss.to(device)
59
  images = Variable(images).to(device)
60
  dir_vec = dir_vec.to(device)
61
  # take norm wrt dim
 
78
  images.detach().clone() + noise_uniform, requires_grad=True
79
  ).to(device)
80
 
81
+ for i in trange(self.n_iters):
82
  adv_features = extract_features(
83
  adv_images, self.extractor_ens, self.dim
84
  ).to(device)
 
117
  else:
118
  adv_images[dist > dist_old] = adv_images_old[dist > dist_old]
119
  dist[dist > dist_old] = dist_old[dist > dist_old]
 
120
  return adv_images.detach().cpu()
121
 
122
  def lpips_reg(self, images, adv_images):
123
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
124
  if self.warp:
125
  face_adv = warp_image(adv_images, self.theta_warp)
126
  lpips_out = self.lpips_loss(