|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from picamera.array import PiRGBArray |
|
from picamera import PiCamera |
|
import argparse |
|
import warnings |
|
import datetime |
|
import imutils |
|
import json |
|
import time |
|
import cv2 |
|
import os |
|
|
|
|
|
from tflite_runtime.interpreter import Interpreter |
|
from PIL import Image, ImageOps |
|
import numpy as np |
|
|
|
|
|
interpreter = Interpreter(model_path="lego_tflite_model/detect.tflite") |
|
interpreter.allocate_tensors() |
|
path = r'/home/nullspacepi/Desktop/opencv-test/lego-pieces' |
|
|
|
|
|
input_details = interpreter.get_input_details() |
|
output_details = interpreter.get_output_details() |
|
input_shape = input_details[0]['shape'] |
|
|
|
|
|
def img_to_array(img, data_format='channels_last', dtype='float32'): |
|
if data_format not in {'channels_first', 'channels_last'}: |
|
raise ValueError('Unknown data_format: %s' % data_format) |
|
|
|
x = np.asarray(img, dtype=dtype) |
|
if len(x.shape) == 3: |
|
if data_format == 'channels_first': |
|
x = x.transpose(2, 0, 1) |
|
elif len(x.shape) == 2: |
|
if data_format == 'channels_first': |
|
x = x.reshape((1, x.shape[0], x.shape[1])) |
|
else: |
|
x = x.reshape((x.shape[0], x.shape[1], 1)) |
|
else: |
|
raise ValueError('Unsupported image shape: %s' % (x.shape,)) |
|
return x |
|
|
|
|
|
|
|
def increase_contrast_more(s): |
|
minval = np.percentile(s, 2) |
|
maxval = np.percentile(s, 98) |
|
npImage = np.clip(s, minval, maxval) |
|
|
|
npImage = npImage.astype(int) |
|
|
|
min=np.min(npImage) |
|
max=np.max(npImage) |
|
|
|
|
|
LUT=np.zeros(256,dtype=np.float32) |
|
LUT[min:max+1]=np.linspace(start=0,stop=255,num=(max-min)+1,endpoint=True,dtype=np.float32) |
|
s_clipped = LUT[npImage] |
|
return s_clipped |
|
|
|
|
|
def load_labels(path): |
|
with open(path, 'r') as f: |
|
return [line.strip() for i, line in enumerate(f.readlines())] |
|
|
|
|
|
labels = load_labels("lego_tflite_model/labelmap.txt") |
|
|
|
|
|
ap = argparse.ArgumentParser() |
|
ap.add_argument("-c", "--conf", required=True, help="path to the JSON configuration file") |
|
args = vars(ap.parse_args()) |
|
|
|
|
|
warnings.filterwarnings("ignore") |
|
conf = json.load(open(args["conf"])) |
|
client = None |
|
|
|
|
|
camera = PiCamera() |
|
camera.resolution = tuple(conf["resolution"]) |
|
camera.framerate = conf["fps"] |
|
rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"])) |
|
|
|
|
|
|
|
print("[INFO] warming up...") |
|
time.sleep(conf["camera_warmup_time"]) |
|
avg = None |
|
motionCounter = 0 |
|
image_number = 0 |
|
|
|
|
|
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): |
|
|
|
|
|
frame = f.array |
|
text = "No piece" |
|
|
|
|
|
frame = imutils.resize(frame, width=500) |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
gray = cv2.GaussianBlur(gray, (21, 21), 0) |
|
|
|
|
|
if avg is None: |
|
print("[INFO] starting background model...") |
|
avg = gray.copy().astype("float") |
|
rawCapture.truncate(0) |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
cv2.accumulateWeighted(gray, avg, 0.5) |
|
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) |
|
|
|
|
|
|
|
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, |
|
cv2.THRESH_BINARY)[1] |
|
thresh = cv2.dilate(thresh, None, iterations=2) |
|
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, |
|
cv2.CHAIN_APPROX_SIMPLE) |
|
cnts = imutils.grab_contours(cnts) |
|
|
|
|
|
|
|
for c in cnts: |
|
|
|
if cv2.contourArea(c) < conf["min_area"]: |
|
continue |
|
|
|
|
|
|
|
(x, y, w, h) = cv2.boundingRect(c) |
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) |
|
piece_image = frame[y:y+h,x:x+w] |
|
text = "Piece found" |
|
|
|
|
|
|
|
if text == "Piece found": |
|
|
|
|
|
|
|
motionCounter += 1 |
|
print("motionCounter= ", motionCounter) |
|
print("image_number= ", image_number) |
|
|
|
|
|
if motionCounter >= 8: |
|
image_number +=1 |
|
image_name = str(image_number)+"image.jpg" |
|
cv2.imwrite(os.path.join(path, image_name), piece_image) |
|
motionCounter = 0 |
|
|
|
|
|
input_image = Image.open('lego-pieces/'+ image_name) |
|
input_image = ImageOps.grayscale(input_image) |
|
input_image = input_image.resize((128,128)) |
|
input_data = img_to_array(input_image) |
|
input_data = increase_contrast_more(input_data) |
|
input_data.resize(1,128,128,1) |
|
|
|
|
|
interpreter.set_tensor(input_details[0]['index'], input_data) |
|
interpreter.invoke() |
|
output_data = interpreter.get_tensor(output_details[0]['index']) |
|
|
|
|
|
|
|
category_number = np.argmax(output_data[0]) |
|
|
|
|
|
|
|
classification_label = labels[category_number] |
|
print("Image Label for " + image_name + " is :", classification_label) |
|
|
|
|
|
|
|
else: |
|
motionCounter = 0 |
|
|
|
|
|
|
|
|
|
if conf["show_video"]: |
|
|
|
cv2.imshow("Feed", frame) |
|
key = cv2.waitKey(1) & 0xFF |
|
|
|
if key == ord("q"): |
|
break |
|
|
|
rawCapture.truncate(0) |
|
|