it@M InnovationLab commited on
Commit
6248ce2
·
1 Parent(s): 8529105

Remove lp anonynimisation

Browse files
Files changed (2) hide show
  1. app-Copy1.py +76 -0
  2. app.py +5 -24
app-Copy1.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import yolov5
3
+ import numpy as np
4
+ from PIL import Image, ImageDraw, ImageFilter
5
+ from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
6
+ import torchvision.transforms
7
+ import torch
8
+
9
+ person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
10
+ person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
11
+ transform = torchvision.transforms.ToPILImage()
12
+
13
+ lp_model = yolov5.load('keremberke/yolov5m-license-plate')
14
+ lp_model.conf = 0.25 # NMS confidence threshold
15
+ lp_model.iou = 0.45 # NMS IoU threshold
16
+ lp_model.agnostic = False # NMS class-agnostic
17
+ lp_model.multi_label = False # NMS multiple labels per box
18
+ lp_model.max_det = 1000 # maximum number of detections per image
19
+
20
+ def detect_person(image: Image):
21
+ semantic_inputs = person_processor(images=image, task_inputs=["semantic"], return_tensors="pt")
22
+ semantic_outputs = person_model(**semantic_inputs)
23
+ predicted_semantic_map = person_processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0]
24
+ mask = transform(predicted_semantic_map.to(torch.uint8))
25
+ mask = Image.eval(mask, lambda x: 0 if x == 11 else 255)
26
+ return mask
27
+
28
+
29
+ def detect_license_plate(image: Image):
30
+ results = lp_model(image, size=image.size[0])
31
+ predictions = results.pred[0]
32
+ boxes = predictions[:, :4]
33
+ mask = Image.new(mode="L", size=image.size, color=255)
34
+ draw = ImageDraw.Draw(mask)
35
+ for box in boxes:
36
+ draw.rectangle(list(box), fill=0)
37
+ return mask
38
+
39
+
40
+ def detect_dummy(image: Image):
41
+ return Image.new(mode="L", size=image.size, color=255)
42
+
43
+
44
+ detectors = {
45
+ "Person": detect_person,
46
+ "License Plate": detect_license_plate
47
+ }
48
+
49
+
50
+ def anonymize(path: str, detectors: list):
51
+ # Read image
52
+ image = Image.open(path)
53
+ # Run requested detectors
54
+ masks = [implemented_detectors.get(det, detect_dummy)(image) for det in detectors]
55
+ # Combine masks
56
+ combined = np.minimum.reduce([np.array(m) for m in masks])
57
+ mask = Image.fromarray(combined)
58
+ # Apply blur through mask
59
+ blurred = image.filter(ImageFilter.GaussianBlur(15))
60
+ anonymized = Image.composite(image, blurred, mask)
61
+ return anonymized
62
+
63
+
64
+ def test_gradio(image):
65
+ masks = [detect_person(image), detect_license_plate(image)]
66
+ combined = np.minimum.reduce([np.array(m) for m in masks])
67
+ mask = Image.fromarray(combined)
68
+ # Apply blur through mask
69
+ blurred = image.filter(ImageFilter.GaussianBlur(15))
70
+ anonymized = Image.composite(image, blurred, mask)
71
+ return anonymized
72
+
73
+
74
+ demo = gr.Interface(fn=test_gradio, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"))
75
+ demo.launch(share=True)
76
+ #demo.launch(server_name="localhost", server_port=8080)
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- import yolov5
3
  import numpy as np
4
  from PIL import Image, ImageDraw, ImageFilter
5
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
@@ -10,13 +9,6 @@ person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_citysc
10
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
11
  transform = torchvision.transforms.ToPILImage()
12
 
13
- lp_model = yolov5.load('keremberke/yolov5m-license-plate')
14
- lp_model.conf = 0.25 # NMS confidence threshold
15
- lp_model.iou = 0.45 # NMS IoU threshold
16
- lp_model.agnostic = False # NMS class-agnostic
17
- lp_model.multi_label = False # NMS multiple labels per box
18
- lp_model.max_det = 1000 # maximum number of detections per image
19
-
20
  def detect_person(image: Image):
21
  semantic_inputs = person_processor(images=image, task_inputs=["semantic"], return_tensors="pt")
22
  semantic_outputs = person_model(**semantic_inputs)
@@ -26,18 +18,6 @@ def detect_person(image: Image):
26
  return mask
27
 
28
 
29
- def detect_license_plate(image: Image):
30
- results = lp_model(image, size=image.size[0])
31
- predictions = results.pred[0]
32
- boxes = predictions[:, :4]
33
- from PIL import Image, ImageDraw, ImageFilter
34
- mask = Image.new(mode="L", size=image.size, color=255)
35
- draw = ImageDraw.Draw(mask)
36
- for box in boxes:
37
- draw.rectangle(list(box), fill=0)
38
- return mask
39
-
40
-
41
  def detect_dummy(image: Image):
42
  return Image.new(mode="L", size=image.size, color=255)
43
 
@@ -63,7 +43,7 @@ def anonymize(path: str, detectors: list):
63
 
64
 
65
  def test_gradio(image):
66
- masks = [detect_person(image), detect_license_plate(image)]
67
  combined = np.minimum.reduce([np.array(m) for m in masks])
68
  mask = Image.fromarray(combined)
69
  # Apply blur through mask
@@ -71,6 +51,7 @@ def test_gradio(image):
71
  anonymized = Image.composite(image, blurred, mask)
72
  return anonymized
73
 
74
- demo = gr.Interface(fn=test_gradio, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"))
75
- demo.launch()
76
- #demo.launch(server_name="localhost", server_port=8080)
 
 
1
  import gradio as gr
 
2
  import numpy as np
3
  from PIL import Image, ImageDraw, ImageFilter
4
  from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
 
9
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
10
  transform = torchvision.transforms.ToPILImage()
11
 
 
 
 
 
 
 
 
12
  def detect_person(image: Image):
13
  semantic_inputs = person_processor(images=image, task_inputs=["semantic"], return_tensors="pt")
14
  semantic_outputs = person_model(**semantic_inputs)
 
18
  return mask
19
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def detect_dummy(image: Image):
22
  return Image.new(mode="L", size=image.size, color=255)
23
 
 
43
 
44
 
45
  def test_gradio(image):
46
+ masks = [detect_person(image)]
47
  combined = np.minimum.reduce([np.array(m) for m in masks])
48
  mask = Image.fromarray(combined)
49
  # Apply blur through mask
 
51
  anonymized = Image.composite(image, blurred, mask)
52
  return anonymized
53
 
54
+
55
+ demo = gr.Interface(fn=test_gradio, source="webcam", inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"))
56
+ demo.launch(share=True)
57
+ #demo.launch(server_name="localhost", server_port=8080)