ITACA_Insurance_Core_v4 / Object_Detector.py
dperales's picture
Upload 12 files
b2fbe3d
raw
history blame
5.65 kB
import os
import tensorflow as tf
import tensorflow_hub as hub
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
import matplotlib.pyplot as plt
import matplotlib as mpl
# For drawing onto the image.
import numpy as np
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
import time
import streamlit as st
# For measuring the inference time.
import time
class ObjectDetector:
def __init__(self):
# Load Tokenizer & Model
# hub_location = 'cardiffnlp/twitter-roberta-base-sentiment'
# self.tokenizer = AutoTokenizer.from_pretrained(hub_location)
# self.model = AutoModelForSequenceClassification.from_pretrained(hub_location)
# Change model labels in config
# self.model.config.id2label[0] = "Negative"
# self.model.config.id2label[1] = "Neutral"
# self.model.config.id2label[2] = "Positive"
# self.model.config.label2id["Negative"] = self.model.config.label2id.pop("LABEL_0")
# self.model.config.label2id["Neutral"] = self.model.config.label2id.pop("LABEL_1")
# self.model.config.label2id["Positive"] = self.model.config.label2id.pop("LABEL_2")
# Instantiate explainer
# self.explainer = SequenceClassificationExplainer(self.model, self.tokenizer)
# module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1"
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
self.detector = hub.load(module_handle).signatures['default']
def run_detector(self, path):
img = path
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
start_time = time.time()
result = self.detector(converted_img)
end_time = time.time()
result = {key:value.numpy() for key,value in result.items()}
primer = format(result["detection_class_entities"][0]) + ' ' + format(round(result["detection_scores"][0]*100)) + '%'
image_with_boxes = self.draw_boxes(
img, result["detection_boxes"],
result["detection_class_entities"], result["detection_scores"])
# display_image(image_with_boxes)
return image_with_boxes, primer
def display_image(self, image):
fig = plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)
def draw_bounding_box_on_image(self, image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(self, image, boxes, class_names, scores, max_boxes=10, min_score=0.4):
"""Overlay labeled boxes on an image with formatted scores and label names."""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("./Roboto-Light.ttf", 24)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(boxes.shape[0], max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(class_names[i].decode("ascii"),
int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
self.draw_bounding_box_on_image(
image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image