narugo1992 commited on
Commit
d96108e
1 Parent(s): d5f3b25

dev(narugo): add same mode

Browse files
Files changed (2) hide show
  1. app.py +7 -2
  2. attack.py +10 -2
app.py CHANGED
@@ -48,13 +48,18 @@ if __name__ == '__main__':
48
  ' style=\'text-decoration: underline;\'>'
49
  ' Github - 7eu7d7/anime-ai-detect-fucker'
50
  ' </a>.'
 
 
 
 
 
51
  '</p>')
52
  with gr.Row():
53
  with gr.Column():
54
  gr_input_image = gr.Image(type='pil', label='Original Image')
55
  with gr.Row():
56
- gr_attack_target = gr.Radio(['auto', 'ai', 'human'], value='auto', label='Attack Target')
57
- gr_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label='Steps')
58
  with gr.Row():
59
  gr_eps = gr.Slider(label="eps (Noise intensity)", minimum=1, maximum=16, step=1, value=1)
60
  gr_noise_step_size = gr.Slider(label="Noise step size", minimum=0.001, maximum=16, step=0.001,
 
48
  ' style=\'text-decoration: underline;\'>'
49
  ' Github - 7eu7d7/anime-ai-detect-fucker'
50
  ' </a>.'
51
+ ' or'
52
+ ' <a href="https://colab.research.google.com/drive/1bzoZtE28Y8vfmjuaxXKdh-fr4K0WWSmf?usp=sharing" '
53
+ ' target="_parent">'
54
+ ' <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>'
55
+ ' </a>'
56
  '</p>')
57
  with gr.Row():
58
  with gr.Column():
59
  gr_input_image = gr.Image(type='pil', label='Original Image')
60
  with gr.Row():
61
+ gr_attack_target = gr.Radio(['auto', 'ai', 'human', 'same'], value='auto', label='Attack Target')
62
+ gr_steps = gr.Slider(minimum=1, maximum=100, value=20, step=1, label='Steps')
63
  with gr.Row():
64
  gr_eps = gr.Slider(label="eps (Noise intensity)", minimum=1, maximum=16, step=1, value=1)
65
  gr_noise_step_size = gr.Slider(label="Noise step size", minimum=0.001, maximum=16, step=0.001,
attack.py CHANGED
@@ -20,7 +20,7 @@ def make_args(args_=None):
20
 
21
  parser.add_argument('inputs', type=str)
22
  parser.add_argument('--out_dir', type=str, default='./output')
23
- parser.add_argument('--target', type=str, default='auto', help='[auto, ai, human]')
24
  parser.add_argument('--eps', type=float, default=8 / 8, help='Noise intensity ')
25
  parser.add_argument('--step_size', type=float, default=1.087313 / 8, help='Attack step size')
26
  parser.add_argument('--steps', type=int, default=20, help='Attack step count')
@@ -50,6 +50,8 @@ class Attacker:
50
  self.target = torch.tensor([1]).to(device)
51
  elif args.target == 'human':
52
  self.target = torch.tensor([0]).to(device)
 
 
53
 
54
  dataset_mean_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
55
  dataset_std_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
@@ -59,7 +61,13 @@ class Attacker:
59
  self.pgd = PGD(self.model, img_transform=(
60
  lambda x: (x - dataset_mean_t) / dataset_std_t, lambda x: x * dataset_std_t + dataset_mean_t))
61
  self.pgd.set_para(eps=(args.eps * 2) / 255, alpha=lambda: (args.step_size * 2) / 255, iters=args.steps)
62
- self.pgd.set_loss(CrossEntropyLoss())
 
 
 
 
 
 
63
 
64
  def save_image(self, image, noise, img_name):
65
  # 缩放图片只缩放噪声
 
20
 
21
  parser.add_argument('inputs', type=str)
22
  parser.add_argument('--out_dir', type=str, default='./output')
23
+ parser.add_argument('--target', type=str, default='auto', help='[auto, ai, human, same]')
24
  parser.add_argument('--eps', type=float, default=8 / 8, help='Noise intensity ')
25
  parser.add_argument('--step_size', type=float, default=1.087313 / 8, help='Attack step size')
26
  parser.add_argument('--steps', type=int, default=20, help='Attack step count')
 
50
  self.target = torch.tensor([1]).to(device)
51
  elif args.target == 'human':
52
  self.target = torch.tensor([0]).to(device)
53
+ else:
54
+ self.target = torch.tensor([0]).to(device)
55
 
56
  dataset_mean_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
57
  dataset_std_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
 
61
  self.pgd = PGD(self.model, img_transform=(
62
  lambda x: (x - dataset_mean_t) / dataset_std_t, lambda x: x * dataset_std_t + dataset_mean_t))
63
  self.pgd.set_para(eps=(args.eps * 2) / 255, alpha=lambda: (args.step_size * 2) / 255, iters=args.steps)
64
+ if args.target != 'same':
65
+ self.pgd.set_loss(CrossEntropyLoss())
66
+ else:
67
+ def loss_same(a, b):
68
+ return -torch.exp((a[0, 0] - a[0, 1]) ** 2)
69
+
70
+ self.pgd.set_loss(loss_same)
71
 
72
  def save_image(self, image, noise, img_name):
73
  # 缩放图片只缩放噪声