kasper-boy commited on
Commit
6a276e8
1 Parent(s): 02c083a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image, ImageDraw, ImageFont
3
+ import scipy.io.wavfile as wavfile
4
+
5
+
6
+ # Use a pipeline as a high-level helper
7
+ from transformers import pipeline
8
+
9
+ narrator = pipeline("text-to-speech",
10
+ model="kakao-enterprise/vits-ljs")
11
+
12
+ object_detector = pipeline("object-detection",
13
+ model="facebook/detr-resnet-50")
14
+
15
+
16
+ # Define the function to generate audio from text
17
+ def generate_audio(text):
18
+ # Generate the narrated text
19
+ narrated_text = narrator(text)
20
+
21
+ # Save the audio to a WAV file
22
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
23
+ data=narrated_text["audio"][0])
24
+
25
+ # Return the path to the saved audio file
26
+ return "output.wav"
27
+
28
+ # Could you please write me a python code that will take list of detection object as an input and it will give the response that will include all the objects (labels) provided in the input. For example if the input is like this: [{'score': 0.9996405839920044, 'label': 'person', 'box': {'xmin': 435, 'ymin': 282, 'xmax': 636, 'ymax': 927}}, {'score': 0.9995879530906677, 'label': 'dog', 'box': {'xmin': 570, 'ymin': 694, 'xmax': 833, 'ymax': 946}}]
29
+ # The output should be, This pictuture contains 1 person and 1 dog. If there are multiple objects, do not add 'and' between every objects but 'and' should be at the end only
30
+
31
+
32
+ def read_objects(detection_objects):
33
+ # Initialize counters for each object label
34
+ object_counts = {}
35
+
36
+ # Count the occurrences of each label
37
+ for detection in detection_objects:
38
+ label = detection['label']
39
+ if label in object_counts:
40
+ object_counts[label] += 1
41
+ else:
42
+ object_counts[label] = 1
43
+
44
+ # Generate the response string
45
+ response = "This picture contains"
46
+ labels = list(object_counts.keys())
47
+ for i, label in enumerate(labels):
48
+ response += f" {object_counts[label]} {label}"
49
+ if object_counts[label] > 1:
50
+ response += "s"
51
+ if i < len(labels) - 2:
52
+ response += ","
53
+ elif i == len(labels) - 2:
54
+ response += " and"
55
+
56
+ response += "."
57
+
58
+ return response
59
+
60
+
61
+
62
+ def draw_bounding_boxes(image, detections, font_path=None, font_size=20):
63
+ """
64
+ Draws bounding boxes on the given image based on the detections.
65
+ :param image: PIL.Image object
66
+ :param detections: List of detection results, where each result is a dictionary containing
67
+ 'score', 'label', and 'box' keys. 'box' itself is a dictionary with 'xmin',
68
+ 'ymin', 'xmax', 'ymax'.
69
+ :param font_path: Path to the TrueType font file to use for text.
70
+ :param font_size: Size of the font to use for text.
71
+ :return: PIL.Image object with bounding boxes drawn.
72
+ """
73
+ # Make a copy of the image to draw on
74
+ draw_image = image.copy()
75
+ draw = ImageDraw.Draw(draw_image)
76
+
77
+ # Load custom font or default font if path not provided
78
+ if font_path:
79
+ font = ImageFont.truetype(font_path, font_size)
80
+ else:
81
+ # When font_path is not provided, load default font but it's size is fixed
82
+ font = ImageFont.load_default()
83
+ # Increase font size workaround by using a TTF font file, if needed, can download and specify the path
84
+
85
+ for detection in detections:
86
+ box = detection['box']
87
+ xmin = box['xmin']
88
+ ymin = box['ymin']
89
+ xmax = box['xmax']
90
+ ymax = box['ymax']
91
+
92
+ # Draw the bounding box
93
+ draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=3)
94
+
95
+ # Optionally, you can also draw the label and score
96
+ label = detection['label']
97
+ score = detection['score']
98
+ text = f"{label} {score:.2f}"
99
+
100
+ # Draw text with background rectangle for visibility
101
+ if font_path: # Use the custom font with increased size
102
+ text_size = draw.textbbox((xmin, ymin), text, font=font)
103
+ else:
104
+ # Calculate text size using the default font
105
+ text_size = draw.textbbox((xmin, ymin), text)
106
+
107
+ draw.rectangle([(text_size[0], text_size[1]), (text_size[2], text_size[3])], fill="red")
108
+ draw.text((xmin, ymin), text, fill="white", font=font)
109
+
110
+ return draw_image
111
+
112
+
113
+ def detect_object(image):
114
+ raw_image = image
115
+ output = object_detector(raw_image)
116
+ processed_image = draw_bounding_boxes(raw_image, output)
117
+ natural_text = read_objects(output)
118
+ processed_audio = generate_audio(natural_text)
119
+ return processed_image, processed_audio
120
+
121
+
122
+ demo = gr.Interface(fn=detect_object,
123
+ inputs=[gr.Image(label="Select Image",type="pil")],
124
+ outputs=[gr.Image(label="Processed Image", type="pil"), gr.Audio(label="Generated Audio")],
125
+ title="AI-Powered Object Detection with Audio Feedback",
126
+ description="Upload an image and get object detection results using the DETR model with a ResNet-50 backbone with Audio Feedback")
127
+ demo.launch()
128
+
129
+