abdulmalek9 commited on
Commit
8d1019d
·
1 Parent(s): 26cb83d
Dockerfile ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ FROM python:3.9
3
+
4
+ WORKDIR /code
5
+
6
+ COPY ./requirements.txt /code/requirements.txt
7
+
8
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
9
+
10
+ COPY . .
11
+
12
+ CMD ["gunicorn","-b","0.0.0.0:7860" "main:app"]
emotion_recognition/__pycache__/ai_model_photo.cpython-311.pyc ADDED
Binary file (5.68 kB). View file
 
emotion_recognition/abd13.jpg ADDED
emotion_recognition/ai_model_photo.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Detecte Emotion By photo
2
+ import cv2
3
+ import os
4
+ from keras.models import model_from_json
5
+ import numpy as np
6
+ import matplotlib.pyplot as plt
7
+ import threading
8
+
9
+
10
+ def display_image(image_array):
11
+ cv2.imshow('My Image', image_array)
12
+ cv2.waitKey(0)
13
+ cv2.destroyAllWindows()
14
+
15
+
16
+ # os.chdir('models')
17
+ emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful",
18
+ 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
19
+
20
+ # load json and create model
21
+ # put your model path there
22
+ json_file = open(
23
+ "emotion_recognition/models/emotion_model(MyNet0.82).json", 'r')
24
+ loaded_model_json = json_file.read()
25
+ json_file.close()
26
+ emotion_model = model_from_json(loaded_model_json)
27
+
28
+ # load weights into new model
29
+ # put your weight path there
30
+ emotion_model.load_weights(
31
+ "emotion_recognition/models/emotion_model(MyNet0.82).h5")
32
+ print("Loaded model from disk")
33
+
34
+
35
+ def detectFace(net, frame, confidence_threshold=0.7):
36
+ frameOpencvDNN = frame.copy()
37
+ print(frameOpencvDNN.shape)
38
+ frameHeight = frameOpencvDNN.shape[0]
39
+ frameWidth = frameOpencvDNN.shape[1]
40
+ blob = cv2.dnn.blobFromImage(frameOpencvDNN, 1.0, (227, 227), [
41
+ 124.96, 115.97, 106.13], swapRB=True, crop=False)
42
+ net.setInput(blob)
43
+ detections = net.forward()
44
+ faceBoxes = []
45
+ for i in range(detections.shape[2]):
46
+ confidence = detections[0, 0, i, 2]
47
+ if confidence > confidence_threshold:
48
+ x1 = int(detections[0, 0, i, 3]*frameWidth)
49
+ y1 = int(detections[0, 0, i, 4]*frameHeight)
50
+ x2 = int(detections[0, 0, i, 5]*frameWidth)
51
+ y2 = int(detections[0, 0, i, 6]*frameHeight)
52
+ print("x1=", x1, " x2=", x2, " y1=", y1, " y2=", y2)
53
+ faceBoxes.append([x1, y1, x2, y2])
54
+ cv2.rectangle(frameOpencvDNN, (x1, y1), (x2, y2),
55
+ (0, 255, 0), int(round(frameHeight/150)), 8)
56
+ return frameOpencvDNN, faceBoxes
57
+
58
+
59
+ faceProto = 'emotion_recognition/models/opencv_face_detector.pbtxt'
60
+ faceModel = 'emotion_recognition/models/opencv_face_detector_uint8.pb'
61
+ # Loding detecting face model
62
+ faceNet = cv2.dnn.readNet(faceModel, faceProto)
63
+
64
+ # Get a Test image and process it to send it to model
65
+
66
+
67
+ def ai(path):
68
+ f = cv2.imread(path, cv2.IMREAD_COLOR)
69
+ # cv2.imshow("fla",f)
70
+ gray_frame = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
71
+ resultImg, faceBoxes = detectFace(faceNet, f)
72
+ print('faceBoxes', faceBoxes)
73
+
74
+ # Get the cordnate of face
75
+ x1, y1, x2, y2 = faceBoxes[0][0], faceBoxes[0][1], faceBoxes[0][2], faceBoxes[0][3]
76
+ print("x , y , w , h", x1, y1, x2, y2)
77
+
78
+ roi_gray_frame = gray_frame[y1-20:y2+10, x1-20:x2+10]
79
+ cropped_img = np.expand_dims(np.expand_dims(
80
+ cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
81
+ img_resized = cv2.resize(resultImg, (0, 0), fx=0.5,
82
+ fy=0.5, interpolation=cv2.INTER_AREA)
83
+ # send photo to model
84
+ emotion_prediction = emotion_model.predict(cropped_img)
85
+
86
+ # Get the result
87
+ maxindex = int(np.argmax(emotion_prediction))
88
+ cv2.putText(img_resized, emotion_dict[maxindex], (x1+5, y1-20),
89
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
90
+
91
+ # cv2.imshow("crop",resultImg)
92
+ # cv2.resizeWindow("crop",720,460)
93
+ # cv2.imshow("crop2",roi_gray_frame)
94
+ # cv2.resizeWindow("crop2",720,460)
95
+ print("emotion_prediction=", emotion_dict[maxindex])
96
+
97
+ display_thread = threading.Thread(
98
+ target=display_image, args=(img_resized,))
99
+ display_thread.start()
100
+
101
+ return emotion_dict[maxindex]
emotion_recognition/ai_model_real_time.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Detecte Emotion in Real Time
2
+ import cv2
3
+ import os
4
+ from keras.models import model_from_json
5
+ import numpy as np
6
+ os.chdir('D:/Desktop/CAE/fifth/graduation_project/python/emotion_recognition/models')
7
+ emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful",
8
+ 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
9
+
10
+ # load json and create model
11
+ # put your model path there
12
+ json_file = open(
13
+ 'D:/Desktop/CAE/fifth/graduation_project/python/emotion_recognition/models/emotion_model(MyNet0.82).json', 'r')
14
+ loaded_model_json = json_file.read()
15
+ json_file.close()
16
+ emotion_model = model_from_json(loaded_model_json)
17
+
18
+ # load weights into new model
19
+ # put your weight path there
20
+ emotion_model.load_weights(
21
+ "D:/Desktop/CAE/fifth/graduation_project/python/emotion_recognition/models/emotion_model(MyNet0.82).h5")
22
+ print("Loaded model from disk")
23
+
24
+
25
+ def detectFace(net, frame, confidence_threshold=0.7):
26
+ frameOpencvDNN = frame.copy()
27
+ print(frameOpencvDNN.shape)
28
+ frameHeight = frameOpencvDNN.shape[0]
29
+ frameWidth = frameOpencvDNN.shape[1]
30
+ blob = cv2.dnn.blobFromImage(frameOpencvDNN, 1.0, (227, 227), [
31
+ 124.96, 115.97, 106.13], swapRB=True, crop=False)
32
+ net.setInput(blob)
33
+ detections = net.forward()
34
+ faceBoxes = []
35
+ for i in range(detections.shape[2]):
36
+ confidence = detections[0, 0, i, 2]
37
+ if confidence > confidence_threshold:
38
+ x1 = int(detections[0, 0, i, 3]*frameWidth)
39
+ y1 = int(detections[0, 0, i, 4]*frameHeight)
40
+ x2 = int(detections[0, 0, i, 5]*frameWidth)
41
+ y2 = int(detections[0, 0, i, 6]*frameHeight)
42
+ if ((x1 > 60) and (x2 > 50) and (y1 > 40) and (y2 > 40)):
43
+ print("x1=", x1, " x2=", x2, " y1=", y1, " y2=", y2)
44
+ faceBoxes.append([x1, y1, x2, y2])
45
+ else:
46
+ continue
47
+ cv2.rectangle(frameOpencvDNN, (x1, y1), (x2, y2),
48
+ (0, 255, 0), int(round(frameHeight/150)), 8)
49
+ return frameOpencvDNN, faceBoxes
50
+
51
+
52
+ faceProto = './opencv_face_detector.pbtxt'
53
+ faceModel = './opencv_face_detector_uint8.pb'
54
+ # Loding detecting face model
55
+ faceNet = cv2.dnn.readNet(faceModel, faceProto)
56
+
57
+ # Get a Test image and process it to send it to model
58
+
59
+
60
+ def ai():
61
+ cap = cv2.VideoCapture(0)
62
+
63
+ while cv2.waitKey(1) < 0:
64
+ hasFrame, frame = cap.read()
65
+ if not hasFrame:
66
+ cv2.waitKey()
67
+ break
68
+
69
+ resultImg, faceBoxes = detectFace(faceNet, frame)
70
+
71
+ if not faceBoxes:
72
+ print("No face detected")
73
+
74
+ for (x1, y1, x2, y2) in faceBoxes:
75
+ gray_frame = cv2.cvtColor(resultImg, cv2.COLOR_BGR2GRAY)
76
+ print("x , y , w , h", x1, y1, x2, y2)
77
+ roi_gray_frame = gray_frame[y1-20:y2+10, x1-20:x2+10]
78
+ cropped_img = np.expand_dims(np.expand_dims(
79
+ cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
80
+ emotion_prediction = emotion_model.predict(cropped_img)
81
+ maxindex = int(np.argmax(emotion_prediction))
82
+ cv2.putText(resultImg, emotion_dict[maxindex], (
83
+ x1+5, y1-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
84
+
85
+ cv2.imshow("Detecting age and Gender", resultImg)
86
+
87
+ if cv2.waitKey(33) & 0xFF == ord('q'):
88
+ break
89
+
90
+ cap.release()
91
+ cv2.destroyAllWindows()
92
+
93
+
94
+ ai()
emotion_recognition/info.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ this is a trial file to add some lines
2
+ the main purpose of this file is testing our own chatbot
3
+ this bot isn't for answering general questions
4
+ here in computer and automation engineering we are learning about large language model
emotion_recognition/models/emotion_model(MyNet0.82).h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82efa51bfa39bf9540312a61e7220664b87ee7f4687d78d6b59376f7153458f8
3
+ size 9423448
emotion_recognition/models/emotion_model(MyNet0.82).json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"module": "keras.layers", "class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}, "registered_name": null}, {"module": "keras.layers", "class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "dtype": "float32", "batch_input_shape": [null, 48, 48, 1], "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 48, 48, 1]}}, {"module": "keras.layers", "class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 46, 46, 32]}}, {"module": "keras.layers", "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}, "registered_name": null, "build_config": {"input_shape": [null, 44, 44, 64]}}, {"module": "keras.layers", "class_name": "BatchNormalization", "config": {"name": "batch_normalization", "trainable": true, "dtype": "float32", "axis": [3], "momentum": 0.99, "epsilon": 0.001, "center": true, "scale": true, "beta_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "gamma_initializer": {"module": "keras.initializers", "class_name": "Ones", "config": {}, "registered_name": null}, "moving_mean_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "moving_variance_initializer": {"module": "keras.initializers", "class_name": "Ones", "config": {}, "registered_name": null}, "beta_regularizer": null, "gamma_regularizer": null, "beta_constraint": null, "gamma_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 22, 22, 64]}}, {"module": "keras.layers", "class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}, "registered_name": null, "build_config": {"input_shape": [null, 22, 22, 64]}}, {"module": "keras.layers", "class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 22, 22, 64]}}, {"module": "keras.layers", "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}, "registered_name": null, "build_config": {"input_shape": [null, 20, 20, 128]}}, {"module": "keras.layers", "class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}, "registered_name": null, "build_config": {"input_shape": [null, 10, 10, 128]}}, {"module": "keras.layers", "class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 10, 10, 128]}}, {"module": "keras.layers", "class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}, "registered_name": null, "build_config": {"input_shape": [null, 8, 8, 128]}}, {"module": "keras.layers", "class_name": "BatchNormalization", "config": {"name": "batch_normalization_1", "trainable": true, "dtype": "float32", "axis": [3], "momentum": 0.99, "epsilon": 0.001, "center": true, "scale": true, "beta_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "gamma_initializer": {"module": "keras.initializers", "class_name": "Ones", "config": {}, "registered_name": null}, "moving_mean_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "moving_variance_initializer": {"module": "keras.initializers", "class_name": "Ones", "config": {}, "registered_name": null}, "beta_regularizer": null, "gamma_regularizer": null, "beta_constraint": null, "gamma_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 4, 4, 128]}}, {"module": "keras.layers", "class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}, "registered_name": null, "build_config": {"input_shape": [null, 4, 4, 128]}}, {"module": "keras.layers", "class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}, "registered_name": null, "build_config": {"input_shape": [null, 4, 4, 128]}}, {"module": "keras.layers", "class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 2048]}}, {"module": "keras.layers", "class_name": "Dropout", "config": {"name": "dropout_3", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}, "registered_name": null, "build_config": {"input_shape": [null, 1024]}}, {"module": "keras.layers", "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 7, "activation": "softmax", "use_bias": true, "kernel_initializer": {"module": "keras.initializers", "class_name": "GlorotUniform", "config": {"seed": null}, "registered_name": null}, "bias_initializer": {"module": "keras.initializers", "class_name": "Zeros", "config": {}, "registered_name": null}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "registered_name": null, "build_config": {"input_shape": [null, 1024]}}]}, "keras_version": "2.14.0", "backend": "tensorflow"}
emotion_recognition/models/opencv_face_detector.pbtxt ADDED
@@ -0,0 +1,2362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ node {
2
+ name: "data"
3
+ op: "Placeholder"
4
+ attr {
5
+ key: "dtype"
6
+ value {
7
+ type: DT_FLOAT
8
+ }
9
+ }
10
+ }
11
+ node {
12
+ name: "data_bn/FusedBatchNorm"
13
+ op: "FusedBatchNorm"
14
+ input: "data:0"
15
+ input: "data_bn/gamma"
16
+ input: "data_bn/beta"
17
+ input: "data_bn/mean"
18
+ input: "data_bn/std"
19
+ attr {
20
+ key: "epsilon"
21
+ value {
22
+ f: 1.00099996416e-05
23
+ }
24
+ }
25
+ }
26
+ node {
27
+ name: "data_scale/Mul"
28
+ op: "Mul"
29
+ input: "data_bn/FusedBatchNorm"
30
+ input: "data_scale/mul"
31
+ }
32
+ node {
33
+ name: "data_scale/BiasAdd"
34
+ op: "BiasAdd"
35
+ input: "data_scale/Mul"
36
+ input: "data_scale/add"
37
+ }
38
+ node {
39
+ name: "SpaceToBatchND/block_shape"
40
+ op: "Const"
41
+ attr {
42
+ key: "value"
43
+ value {
44
+ tensor {
45
+ dtype: DT_INT32
46
+ tensor_shape {
47
+ dim {
48
+ size: 2
49
+ }
50
+ }
51
+ int_val: 1
52
+ int_val: 1
53
+ }
54
+ }
55
+ }
56
+ }
57
+ node {
58
+ name: "SpaceToBatchND/paddings"
59
+ op: "Const"
60
+ attr {
61
+ key: "value"
62
+ value {
63
+ tensor {
64
+ dtype: DT_INT32
65
+ tensor_shape {
66
+ dim {
67
+ size: 2
68
+ }
69
+ dim {
70
+ size: 2
71
+ }
72
+ }
73
+ int_val: 3
74
+ int_val: 3
75
+ int_val: 3
76
+ int_val: 3
77
+ }
78
+ }
79
+ }
80
+ }
81
+ node {
82
+ name: "Pad"
83
+ op: "SpaceToBatchND"
84
+ input: "data_scale/BiasAdd"
85
+ input: "SpaceToBatchND/block_shape"
86
+ input: "SpaceToBatchND/paddings"
87
+ }
88
+ node {
89
+ name: "conv1_h/Conv2D"
90
+ op: "Conv2D"
91
+ input: "Pad"
92
+ input: "conv1_h/weights"
93
+ attr {
94
+ key: "dilations"
95
+ value {
96
+ list {
97
+ i: 1
98
+ i: 1
99
+ i: 1
100
+ i: 1
101
+ }
102
+ }
103
+ }
104
+ attr {
105
+ key: "padding"
106
+ value {
107
+ s: "VALID"
108
+ }
109
+ }
110
+ attr {
111
+ key: "strides"
112
+ value {
113
+ list {
114
+ i: 1
115
+ i: 2
116
+ i: 2
117
+ i: 1
118
+ }
119
+ }
120
+ }
121
+ }
122
+ node {
123
+ name: "conv1_h/BiasAdd"
124
+ op: "BiasAdd"
125
+ input: "conv1_h/Conv2D"
126
+ input: "conv1_h/bias"
127
+ }
128
+ node {
129
+ name: "BatchToSpaceND"
130
+ op: "BatchToSpaceND"
131
+ input: "conv1_h/BiasAdd"
132
+ }
133
+ node {
134
+ name: "conv1_bn_h/FusedBatchNorm"
135
+ op: "FusedBatchNorm"
136
+ input: "BatchToSpaceND"
137
+ input: "conv1_bn_h/gamma"
138
+ input: "conv1_bn_h/beta"
139
+ input: "conv1_bn_h/mean"
140
+ input: "conv1_bn_h/std"
141
+ attr {
142
+ key: "epsilon"
143
+ value {
144
+ f: 1.00099996416e-05
145
+ }
146
+ }
147
+ }
148
+ node {
149
+ name: "conv1_scale_h/Mul"
150
+ op: "Mul"
151
+ input: "conv1_bn_h/FusedBatchNorm"
152
+ input: "conv1_scale_h/mul"
153
+ }
154
+ node {
155
+ name: "conv1_scale_h/BiasAdd"
156
+ op: "BiasAdd"
157
+ input: "conv1_scale_h/Mul"
158
+ input: "conv1_scale_h/add"
159
+ }
160
+ node {
161
+ name: "Relu"
162
+ op: "Relu"
163
+ input: "conv1_scale_h/BiasAdd"
164
+ }
165
+ node {
166
+ name: "conv1_pool/MaxPool"
167
+ op: "MaxPool"
168
+ input: "Relu"
169
+ attr {
170
+ key: "ksize"
171
+ value {
172
+ list {
173
+ i: 1
174
+ i: 3
175
+ i: 3
176
+ i: 1
177
+ }
178
+ }
179
+ }
180
+ attr {
181
+ key: "padding"
182
+ value {
183
+ s: "SAME"
184
+ }
185
+ }
186
+ attr {
187
+ key: "strides"
188
+ value {
189
+ list {
190
+ i: 1
191
+ i: 2
192
+ i: 2
193
+ i: 1
194
+ }
195
+ }
196
+ }
197
+ }
198
+ node {
199
+ name: "layer_64_1_conv1_h/Conv2D"
200
+ op: "Conv2D"
201
+ input: "conv1_pool/MaxPool"
202
+ input: "layer_64_1_conv1_h/weights"
203
+ attr {
204
+ key: "dilations"
205
+ value {
206
+ list {
207
+ i: 1
208
+ i: 1
209
+ i: 1
210
+ i: 1
211
+ }
212
+ }
213
+ }
214
+ attr {
215
+ key: "padding"
216
+ value {
217
+ s: "SAME"
218
+ }
219
+ }
220
+ attr {
221
+ key: "strides"
222
+ value {
223
+ list {
224
+ i: 1
225
+ i: 1
226
+ i: 1
227
+ i: 1
228
+ }
229
+ }
230
+ }
231
+ }
232
+ node {
233
+ name: "layer_64_1_bn2_h/FusedBatchNorm"
234
+ op: "BiasAdd"
235
+ input: "layer_64_1_conv1_h/Conv2D"
236
+ input: "layer_64_1_conv1_h/Conv2D_bn_offset"
237
+ }
238
+ node {
239
+ name: "layer_64_1_scale2_h/Mul"
240
+ op: "Mul"
241
+ input: "layer_64_1_bn2_h/FusedBatchNorm"
242
+ input: "layer_64_1_scale2_h/mul"
243
+ }
244
+ node {
245
+ name: "layer_64_1_scale2_h/BiasAdd"
246
+ op: "BiasAdd"
247
+ input: "layer_64_1_scale2_h/Mul"
248
+ input: "layer_64_1_scale2_h/add"
249
+ }
250
+ node {
251
+ name: "Relu_1"
252
+ op: "Relu"
253
+ input: "layer_64_1_scale2_h/BiasAdd"
254
+ }
255
+ node {
256
+ name: "layer_64_1_conv2_h/Conv2D"
257
+ op: "Conv2D"
258
+ input: "Relu_1"
259
+ input: "layer_64_1_conv2_h/weights"
260
+ attr {
261
+ key: "dilations"
262
+ value {
263
+ list {
264
+ i: 1
265
+ i: 1
266
+ i: 1
267
+ i: 1
268
+ }
269
+ }
270
+ }
271
+ attr {
272
+ key: "padding"
273
+ value {
274
+ s: "SAME"
275
+ }
276
+ }
277
+ attr {
278
+ key: "strides"
279
+ value {
280
+ list {
281
+ i: 1
282
+ i: 1
283
+ i: 1
284
+ i: 1
285
+ }
286
+ }
287
+ }
288
+ }
289
+ node {
290
+ name: "add"
291
+ op: "Add"
292
+ input: "layer_64_1_conv2_h/Conv2D"
293
+ input: "conv1_pool/MaxPool"
294
+ }
295
+ node {
296
+ name: "layer_128_1_bn1_h/FusedBatchNorm"
297
+ op: "FusedBatchNorm"
298
+ input: "add"
299
+ input: "layer_128_1_bn1_h/gamma"
300
+ input: "layer_128_1_bn1_h/beta"
301
+ input: "layer_128_1_bn1_h/mean"
302
+ input: "layer_128_1_bn1_h/std"
303
+ attr {
304
+ key: "epsilon"
305
+ value {
306
+ f: 1.00099996416e-05
307
+ }
308
+ }
309
+ }
310
+ node {
311
+ name: "layer_128_1_scale1_h/Mul"
312
+ op: "Mul"
313
+ input: "layer_128_1_bn1_h/FusedBatchNorm"
314
+ input: "layer_128_1_scale1_h/mul"
315
+ }
316
+ node {
317
+ name: "layer_128_1_scale1_h/BiasAdd"
318
+ op: "BiasAdd"
319
+ input: "layer_128_1_scale1_h/Mul"
320
+ input: "layer_128_1_scale1_h/add"
321
+ }
322
+ node {
323
+ name: "Relu_2"
324
+ op: "Relu"
325
+ input: "layer_128_1_scale1_h/BiasAdd"
326
+ }
327
+ node {
328
+ name: "layer_128_1_conv_expand_h/Conv2D"
329
+ op: "Conv2D"
330
+ input: "Relu_2"
331
+ input: "layer_128_1_conv_expand_h/weights"
332
+ attr {
333
+ key: "dilations"
334
+ value {
335
+ list {
336
+ i: 1
337
+ i: 1
338
+ i: 1
339
+ i: 1
340
+ }
341
+ }
342
+ }
343
+ attr {
344
+ key: "padding"
345
+ value {
346
+ s: "SAME"
347
+ }
348
+ }
349
+ attr {
350
+ key: "strides"
351
+ value {
352
+ list {
353
+ i: 1
354
+ i: 2
355
+ i: 2
356
+ i: 1
357
+ }
358
+ }
359
+ }
360
+ }
361
+ node {
362
+ name: "layer_128_1_conv1_h/Conv2D"
363
+ op: "Conv2D"
364
+ input: "Relu_2"
365
+ input: "layer_128_1_conv1_h/weights"
366
+ attr {
367
+ key: "dilations"
368
+ value {
369
+ list {
370
+ i: 1
371
+ i: 1
372
+ i: 1
373
+ i: 1
374
+ }
375
+ }
376
+ }
377
+ attr {
378
+ key: "padding"
379
+ value {
380
+ s: "SAME"
381
+ }
382
+ }
383
+ attr {
384
+ key: "strides"
385
+ value {
386
+ list {
387
+ i: 1
388
+ i: 2
389
+ i: 2
390
+ i: 1
391
+ }
392
+ }
393
+ }
394
+ }
395
+ node {
396
+ name: "layer_128_1_bn2/FusedBatchNorm"
397
+ op: "BiasAdd"
398
+ input: "layer_128_1_conv1_h/Conv2D"
399
+ input: "layer_128_1_conv1_h/Conv2D_bn_offset"
400
+ }
401
+ node {
402
+ name: "layer_128_1_scale2/Mul"
403
+ op: "Mul"
404
+ input: "layer_128_1_bn2/FusedBatchNorm"
405
+ input: "layer_128_1_scale2/mul"
406
+ }
407
+ node {
408
+ name: "layer_128_1_scale2/BiasAdd"
409
+ op: "BiasAdd"
410
+ input: "layer_128_1_scale2/Mul"
411
+ input: "layer_128_1_scale2/add"
412
+ }
413
+ node {
414
+ name: "Relu_3"
415
+ op: "Relu"
416
+ input: "layer_128_1_scale2/BiasAdd"
417
+ }
418
+ node {
419
+ name: "layer_128_1_conv2/Conv2D"
420
+ op: "Conv2D"
421
+ input: "Relu_3"
422
+ input: "layer_128_1_conv2/weights"
423
+ attr {
424
+ key: "dilations"
425
+ value {
426
+ list {
427
+ i: 1
428
+ i: 1
429
+ i: 1
430
+ i: 1
431
+ }
432
+ }
433
+ }
434
+ attr {
435
+ key: "padding"
436
+ value {
437
+ s: "SAME"
438
+ }
439
+ }
440
+ attr {
441
+ key: "strides"
442
+ value {
443
+ list {
444
+ i: 1
445
+ i: 1
446
+ i: 1
447
+ i: 1
448
+ }
449
+ }
450
+ }
451
+ }
452
+ node {
453
+ name: "add_1"
454
+ op: "Add"
455
+ input: "layer_128_1_conv2/Conv2D"
456
+ input: "layer_128_1_conv_expand_h/Conv2D"
457
+ }
458
+ node {
459
+ name: "layer_256_1_bn1/FusedBatchNorm"
460
+ op: "FusedBatchNorm"
461
+ input: "add_1"
462
+ input: "layer_256_1_bn1/gamma"
463
+ input: "layer_256_1_bn1/beta"
464
+ input: "layer_256_1_bn1/mean"
465
+ input: "layer_256_1_bn1/std"
466
+ attr {
467
+ key: "epsilon"
468
+ value {
469
+ f: 1.00099996416e-05
470
+ }
471
+ }
472
+ }
473
+ node {
474
+ name: "layer_256_1_scale1/Mul"
475
+ op: "Mul"
476
+ input: "layer_256_1_bn1/FusedBatchNorm"
477
+ input: "layer_256_1_scale1/mul"
478
+ }
479
+ node {
480
+ name: "layer_256_1_scale1/BiasAdd"
481
+ op: "BiasAdd"
482
+ input: "layer_256_1_scale1/Mul"
483
+ input: "layer_256_1_scale1/add"
484
+ }
485
+ node {
486
+ name: "Relu_4"
487
+ op: "Relu"
488
+ input: "layer_256_1_scale1/BiasAdd"
489
+ }
490
+ node {
491
+ name: "SpaceToBatchND_1/paddings"
492
+ op: "Const"
493
+ attr {
494
+ key: "value"
495
+ value {
496
+ tensor {
497
+ dtype: DT_INT32
498
+ tensor_shape {
499
+ dim {
500
+ size: 2
501
+ }
502
+ dim {
503
+ size: 2
504
+ }
505
+ }
506
+ int_val: 1
507
+ int_val: 1
508
+ int_val: 1
509
+ int_val: 1
510
+ }
511
+ }
512
+ }
513
+ }
514
+ node {
515
+ name: "layer_256_1_conv_expand/Conv2D"
516
+ op: "Conv2D"
517
+ input: "Relu_4"
518
+ input: "layer_256_1_conv_expand/weights"
519
+ attr {
520
+ key: "dilations"
521
+ value {
522
+ list {
523
+ i: 1
524
+ i: 1
525
+ i: 1
526
+ i: 1
527
+ }
528
+ }
529
+ }
530
+ attr {
531
+ key: "padding"
532
+ value {
533
+ s: "SAME"
534
+ }
535
+ }
536
+ attr {
537
+ key: "strides"
538
+ value {
539
+ list {
540
+ i: 1
541
+ i: 2
542
+ i: 2
543
+ i: 1
544
+ }
545
+ }
546
+ }
547
+ }
548
+ node {
549
+ name: "conv4_3_norm/l2_normalize"
550
+ op: "L2Normalize"
551
+ input: "Relu_4:0"
552
+ input: "conv4_3_norm/l2_normalize/Sum/reduction_indices"
553
+ }
554
+ node {
555
+ name: "conv4_3_norm/mul_1"
556
+ op: "Mul"
557
+ input: "conv4_3_norm/l2_normalize"
558
+ input: "conv4_3_norm/mul"
559
+ }
560
+ node {
561
+ name: "conv4_3_norm_mbox_loc/Conv2D"
562
+ op: "Conv2D"
563
+ input: "conv4_3_norm/mul_1"
564
+ input: "conv4_3_norm_mbox_loc/weights"
565
+ attr {
566
+ key: "dilations"
567
+ value {
568
+ list {
569
+ i: 1
570
+ i: 1
571
+ i: 1
572
+ i: 1
573
+ }
574
+ }
575
+ }
576
+ attr {
577
+ key: "padding"
578
+ value {
579
+ s: "SAME"
580
+ }
581
+ }
582
+ attr {
583
+ key: "strides"
584
+ value {
585
+ list {
586
+ i: 1
587
+ i: 1
588
+ i: 1
589
+ i: 1
590
+ }
591
+ }
592
+ }
593
+ }
594
+ node {
595
+ name: "conv4_3_norm_mbox_loc/BiasAdd"
596
+ op: "BiasAdd"
597
+ input: "conv4_3_norm_mbox_loc/Conv2D"
598
+ input: "conv4_3_norm_mbox_loc/bias"
599
+ }
600
+ node {
601
+ name: "flatten/Reshape"
602
+ op: "Flatten"
603
+ input: "conv4_3_norm_mbox_loc/BiasAdd"
604
+ }
605
+ node {
606
+ name: "conv4_3_norm_mbox_conf/Conv2D"
607
+ op: "Conv2D"
608
+ input: "conv4_3_norm/mul_1"
609
+ input: "conv4_3_norm_mbox_conf/weights"
610
+ attr {
611
+ key: "dilations"
612
+ value {
613
+ list {
614
+ i: 1
615
+ i: 1
616
+ i: 1
617
+ i: 1
618
+ }
619
+ }
620
+ }
621
+ attr {
622
+ key: "padding"
623
+ value {
624
+ s: "SAME"
625
+ }
626
+ }
627
+ attr {
628
+ key: "strides"
629
+ value {
630
+ list {
631
+ i: 1
632
+ i: 1
633
+ i: 1
634
+ i: 1
635
+ }
636
+ }
637
+ }
638
+ }
639
+ node {
640
+ name: "conv4_3_norm_mbox_conf/BiasAdd"
641
+ op: "BiasAdd"
642
+ input: "conv4_3_norm_mbox_conf/Conv2D"
643
+ input: "conv4_3_norm_mbox_conf/bias"
644
+ }
645
+ node {
646
+ name: "flatten_6/Reshape"
647
+ op: "Flatten"
648
+ input: "conv4_3_norm_mbox_conf/BiasAdd"
649
+ }
650
+ node {
651
+ name: "Pad_1"
652
+ op: "SpaceToBatchND"
653
+ input: "Relu_4"
654
+ input: "SpaceToBatchND/block_shape"
655
+ input: "SpaceToBatchND_1/paddings"
656
+ }
657
+ node {
658
+ name: "layer_256_1_conv1/Conv2D"
659
+ op: "Conv2D"
660
+ input: "Pad_1"
661
+ input: "layer_256_1_conv1/weights"
662
+ attr {
663
+ key: "dilations"
664
+ value {
665
+ list {
666
+ i: 1
667
+ i: 1
668
+ i: 1
669
+ i: 1
670
+ }
671
+ }
672
+ }
673
+ attr {
674
+ key: "padding"
675
+ value {
676
+ s: "VALID"
677
+ }
678
+ }
679
+ attr {
680
+ key: "strides"
681
+ value {
682
+ list {
683
+ i: 1
684
+ i: 2
685
+ i: 2
686
+ i: 1
687
+ }
688
+ }
689
+ }
690
+ }
691
+ node {
692
+ name: "layer_256_1_bn2/FusedBatchNorm"
693
+ op: "BiasAdd"
694
+ input: "layer_256_1_conv1/Conv2D"
695
+ input: "layer_256_1_conv1/Conv2D_bn_offset"
696
+ }
697
+ node {
698
+ name: "BatchToSpaceND_1"
699
+ op: "BatchToSpaceND"
700
+ input: "layer_256_1_bn2/FusedBatchNorm"
701
+ }
702
+ node {
703
+ name: "layer_256_1_scale2/Mul"
704
+ op: "Mul"
705
+ input: "BatchToSpaceND_1"
706
+ input: "layer_256_1_scale2/mul"
707
+ }
708
+ node {
709
+ name: "layer_256_1_scale2/BiasAdd"
710
+ op: "BiasAdd"
711
+ input: "layer_256_1_scale2/Mul"
712
+ input: "layer_256_1_scale2/add"
713
+ }
714
+ node {
715
+ name: "Relu_5"
716
+ op: "Relu"
717
+ input: "layer_256_1_scale2/BiasAdd"
718
+ }
719
+ node {
720
+ name: "layer_256_1_conv2/Conv2D"
721
+ op: "Conv2D"
722
+ input: "Relu_5"
723
+ input: "layer_256_1_conv2/weights"
724
+ attr {
725
+ key: "dilations"
726
+ value {
727
+ list {
728
+ i: 1
729
+ i: 1
730
+ i: 1
731
+ i: 1
732
+ }
733
+ }
734
+ }
735
+ attr {
736
+ key: "padding"
737
+ value {
738
+ s: "SAME"
739
+ }
740
+ }
741
+ attr {
742
+ key: "strides"
743
+ value {
744
+ list {
745
+ i: 1
746
+ i: 1
747
+ i: 1
748
+ i: 1
749
+ }
750
+ }
751
+ }
752
+ }
753
+ node {
754
+ name: "add_2"
755
+ op: "Add"
756
+ input: "layer_256_1_conv2/Conv2D"
757
+ input: "layer_256_1_conv_expand/Conv2D"
758
+ }
759
+ node {
760
+ name: "layer_512_1_bn1/FusedBatchNorm"
761
+ op: "FusedBatchNorm"
762
+ input: "add_2"
763
+ input: "layer_512_1_bn1/gamma"
764
+ input: "layer_512_1_bn1/beta"
765
+ input: "layer_512_1_bn1/mean"
766
+ input: "layer_512_1_bn1/std"
767
+ attr {
768
+ key: "epsilon"
769
+ value {
770
+ f: 1.00099996416e-05
771
+ }
772
+ }
773
+ }
774
+ node {
775
+ name: "layer_512_1_scale1/Mul"
776
+ op: "Mul"
777
+ input: "layer_512_1_bn1/FusedBatchNorm"
778
+ input: "layer_512_1_scale1/mul"
779
+ }
780
+ node {
781
+ name: "layer_512_1_scale1/BiasAdd"
782
+ op: "BiasAdd"
783
+ input: "layer_512_1_scale1/Mul"
784
+ input: "layer_512_1_scale1/add"
785
+ }
786
+ node {
787
+ name: "Relu_6"
788
+ op: "Relu"
789
+ input: "layer_512_1_scale1/BiasAdd"
790
+ }
791
+ node {
792
+ name: "layer_512_1_conv_expand_h/Conv2D"
793
+ op: "Conv2D"
794
+ input: "Relu_6"
795
+ input: "layer_512_1_conv_expand_h/weights"
796
+ attr {
797
+ key: "dilations"
798
+ value {
799
+ list {
800
+ i: 1
801
+ i: 1
802
+ i: 1
803
+ i: 1
804
+ }
805
+ }
806
+ }
807
+ attr {
808
+ key: "padding"
809
+ value {
810
+ s: "SAME"
811
+ }
812
+ }
813
+ attr {
814
+ key: "strides"
815
+ value {
816
+ list {
817
+ i: 1
818
+ i: 1
819
+ i: 1
820
+ i: 1
821
+ }
822
+ }
823
+ }
824
+ }
825
+ node {
826
+ name: "layer_512_1_conv1_h/Conv2D"
827
+ op: "Conv2D"
828
+ input: "Relu_6"
829
+ input: "layer_512_1_conv1_h/weights"
830
+ attr {
831
+ key: "dilations"
832
+ value {
833
+ list {
834
+ i: 1
835
+ i: 1
836
+ i: 1
837
+ i: 1
838
+ }
839
+ }
840
+ }
841
+ attr {
842
+ key: "padding"
843
+ value {
844
+ s: "SAME"
845
+ }
846
+ }
847
+ attr {
848
+ key: "strides"
849
+ value {
850
+ list {
851
+ i: 1
852
+ i: 1
853
+ i: 1
854
+ i: 1
855
+ }
856
+ }
857
+ }
858
+ }
859
+ node {
860
+ name: "layer_512_1_bn2_h/FusedBatchNorm"
861
+ op: "BiasAdd"
862
+ input: "layer_512_1_conv1_h/Conv2D"
863
+ input: "layer_512_1_conv1_h/Conv2D_bn_offset"
864
+ }
865
+ node {
866
+ name: "layer_512_1_scale2_h/Mul"
867
+ op: "Mul"
868
+ input: "layer_512_1_bn2_h/FusedBatchNorm"
869
+ input: "layer_512_1_scale2_h/mul"
870
+ }
871
+ node {
872
+ name: "layer_512_1_scale2_h/BiasAdd"
873
+ op: "BiasAdd"
874
+ input: "layer_512_1_scale2_h/Mul"
875
+ input: "layer_512_1_scale2_h/add"
876
+ }
877
+ node {
878
+ name: "Relu_7"
879
+ op: "Relu"
880
+ input: "layer_512_1_scale2_h/BiasAdd"
881
+ }
882
+ node {
883
+ name: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
884
+ op: "SpaceToBatchND"
885
+ input: "Relu_7"
886
+ input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/block_shape"
887
+ input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/paddings"
888
+ }
889
+ node {
890
+ name: "layer_512_1_conv2_h/convolution"
891
+ op: "Conv2D"
892
+ input: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
893
+ input: "layer_512_1_conv2_h/weights"
894
+ attr {
895
+ key: "dilations"
896
+ value {
897
+ list {
898
+ i: 1
899
+ i: 1
900
+ i: 1
901
+ i: 1
902
+ }
903
+ }
904
+ }
905
+ attr {
906
+ key: "padding"
907
+ value {
908
+ s: "VALID"
909
+ }
910
+ }
911
+ attr {
912
+ key: "strides"
913
+ value {
914
+ list {
915
+ i: 1
916
+ i: 1
917
+ i: 1
918
+ i: 1
919
+ }
920
+ }
921
+ }
922
+ }
923
+ node {
924
+ name: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
925
+ op: "BatchToSpaceND"
926
+ input: "layer_512_1_conv2_h/convolution"
927
+ input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/block_shape"
928
+ input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/crops"
929
+ }
930
+ node {
931
+ name: "add_3"
932
+ op: "Add"
933
+ input: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
934
+ input: "layer_512_1_conv_expand_h/Conv2D"
935
+ }
936
+ node {
937
+ name: "last_bn_h/FusedBatchNorm"
938
+ op: "FusedBatchNorm"
939
+ input: "add_3"
940
+ input: "last_bn_h/gamma"
941
+ input: "last_bn_h/beta"
942
+ input: "last_bn_h/mean"
943
+ input: "last_bn_h/std"
944
+ attr {
945
+ key: "epsilon"
946
+ value {
947
+ f: 1.00099996416e-05
948
+ }
949
+ }
950
+ }
951
+ node {
952
+ name: "last_scale_h/Mul"
953
+ op: "Mul"
954
+ input: "last_bn_h/FusedBatchNorm"
955
+ input: "last_scale_h/mul"
956
+ }
957
+ node {
958
+ name: "last_scale_h/BiasAdd"
959
+ op: "BiasAdd"
960
+ input: "last_scale_h/Mul"
961
+ input: "last_scale_h/add"
962
+ }
963
+ node {
964
+ name: "last_relu"
965
+ op: "Relu"
966
+ input: "last_scale_h/BiasAdd"
967
+ }
968
+ node {
969
+ name: "conv6_1_h/Conv2D"
970
+ op: "Conv2D"
971
+ input: "last_relu"
972
+ input: "conv6_1_h/weights"
973
+ attr {
974
+ key: "dilations"
975
+ value {
976
+ list {
977
+ i: 1
978
+ i: 1
979
+ i: 1
980
+ i: 1
981
+ }
982
+ }
983
+ }
984
+ attr {
985
+ key: "padding"
986
+ value {
987
+ s: "SAME"
988
+ }
989
+ }
990
+ attr {
991
+ key: "strides"
992
+ value {
993
+ list {
994
+ i: 1
995
+ i: 1
996
+ i: 1
997
+ i: 1
998
+ }
999
+ }
1000
+ }
1001
+ }
1002
+ node {
1003
+ name: "conv6_1_h/BiasAdd"
1004
+ op: "BiasAdd"
1005
+ input: "conv6_1_h/Conv2D"
1006
+ input: "conv6_1_h/bias"
1007
+ }
1008
+ node {
1009
+ name: "conv6_1_h/Relu"
1010
+ op: "Relu"
1011
+ input: "conv6_1_h/BiasAdd"
1012
+ }
1013
+ node {
1014
+ name: "conv6_2_h/Conv2D"
1015
+ op: "Conv2D"
1016
+ input: "conv6_1_h/Relu"
1017
+ input: "conv6_2_h/weights"
1018
+ attr {
1019
+ key: "dilations"
1020
+ value {
1021
+ list {
1022
+ i: 1
1023
+ i: 1
1024
+ i: 1
1025
+ i: 1
1026
+ }
1027
+ }
1028
+ }
1029
+ attr {
1030
+ key: "padding"
1031
+ value {
1032
+ s: "SAME"
1033
+ }
1034
+ }
1035
+ attr {
1036
+ key: "strides"
1037
+ value {
1038
+ list {
1039
+ i: 1
1040
+ i: 2
1041
+ i: 2
1042
+ i: 1
1043
+ }
1044
+ }
1045
+ }
1046
+ }
1047
+ node {
1048
+ name: "conv6_2_h/BiasAdd"
1049
+ op: "BiasAdd"
1050
+ input: "conv6_2_h/Conv2D"
1051
+ input: "conv6_2_h/bias"
1052
+ }
1053
+ node {
1054
+ name: "conv6_2_h/Relu"
1055
+ op: "Relu"
1056
+ input: "conv6_2_h/BiasAdd"
1057
+ }
1058
+ node {
1059
+ name: "conv7_1_h/Conv2D"
1060
+ op: "Conv2D"
1061
+ input: "conv6_2_h/Relu"
1062
+ input: "conv7_1_h/weights"
1063
+ attr {
1064
+ key: "dilations"
1065
+ value {
1066
+ list {
1067
+ i: 1
1068
+ i: 1
1069
+ i: 1
1070
+ i: 1
1071
+ }
1072
+ }
1073
+ }
1074
+ attr {
1075
+ key: "padding"
1076
+ value {
1077
+ s: "SAME"
1078
+ }
1079
+ }
1080
+ attr {
1081
+ key: "strides"
1082
+ value {
1083
+ list {
1084
+ i: 1
1085
+ i: 1
1086
+ i: 1
1087
+ i: 1
1088
+ }
1089
+ }
1090
+ }
1091
+ }
1092
+ node {
1093
+ name: "conv7_1_h/BiasAdd"
1094
+ op: "BiasAdd"
1095
+ input: "conv7_1_h/Conv2D"
1096
+ input: "conv7_1_h/bias"
1097
+ }
1098
+ node {
1099
+ name: "conv7_1_h/Relu"
1100
+ op: "Relu"
1101
+ input: "conv7_1_h/BiasAdd"
1102
+ }
1103
+ node {
1104
+ name: "Pad_2"
1105
+ op: "SpaceToBatchND"
1106
+ input: "conv7_1_h/Relu"
1107
+ input: "SpaceToBatchND/block_shape"
1108
+ input: "SpaceToBatchND_1/paddings"
1109
+ }
1110
+ node {
1111
+ name: "conv7_2_h/Conv2D"
1112
+ op: "Conv2D"
1113
+ input: "Pad_2"
1114
+ input: "conv7_2_h/weights"
1115
+ attr {
1116
+ key: "dilations"
1117
+ value {
1118
+ list {
1119
+ i: 1
1120
+ i: 1
1121
+ i: 1
1122
+ i: 1
1123
+ }
1124
+ }
1125
+ }
1126
+ attr {
1127
+ key: "padding"
1128
+ value {
1129
+ s: "VALID"
1130
+ }
1131
+ }
1132
+ attr {
1133
+ key: "strides"
1134
+ value {
1135
+ list {
1136
+ i: 1
1137
+ i: 2
1138
+ i: 2
1139
+ i: 1
1140
+ }
1141
+ }
1142
+ }
1143
+ }
1144
+ node {
1145
+ name: "conv7_2_h/BiasAdd"
1146
+ op: "BiasAdd"
1147
+ input: "conv7_2_h/Conv2D"
1148
+ input: "conv7_2_h/bias"
1149
+ }
1150
+ node {
1151
+ name: "BatchToSpaceND_2"
1152
+ op: "BatchToSpaceND"
1153
+ input: "conv7_2_h/BiasAdd"
1154
+ }
1155
+ node {
1156
+ name: "conv7_2_h/Relu"
1157
+ op: "Relu"
1158
+ input: "BatchToSpaceND_2"
1159
+ }
1160
+ node {
1161
+ name: "conv8_1_h/Conv2D"
1162
+ op: "Conv2D"
1163
+ input: "conv7_2_h/Relu"
1164
+ input: "conv8_1_h/weights"
1165
+ attr {
1166
+ key: "dilations"
1167
+ value {
1168
+ list {
1169
+ i: 1
1170
+ i: 1
1171
+ i: 1
1172
+ i: 1
1173
+ }
1174
+ }
1175
+ }
1176
+ attr {
1177
+ key: "padding"
1178
+ value {
1179
+ s: "SAME"
1180
+ }
1181
+ }
1182
+ attr {
1183
+ key: "strides"
1184
+ value {
1185
+ list {
1186
+ i: 1
1187
+ i: 1
1188
+ i: 1
1189
+ i: 1
1190
+ }
1191
+ }
1192
+ }
1193
+ }
1194
+ node {
1195
+ name: "conv8_1_h/BiasAdd"
1196
+ op: "BiasAdd"
1197
+ input: "conv8_1_h/Conv2D"
1198
+ input: "conv8_1_h/bias"
1199
+ }
1200
+ node {
1201
+ name: "conv8_1_h/Relu"
1202
+ op: "Relu"
1203
+ input: "conv8_1_h/BiasAdd"
1204
+ }
1205
+ node {
1206
+ name: "conv8_2_h/Conv2D"
1207
+ op: "Conv2D"
1208
+ input: "conv8_1_h/Relu"
1209
+ input: "conv8_2_h/weights"
1210
+ attr {
1211
+ key: "dilations"
1212
+ value {
1213
+ list {
1214
+ i: 1
1215
+ i: 1
1216
+ i: 1
1217
+ i: 1
1218
+ }
1219
+ }
1220
+ }
1221
+ attr {
1222
+ key: "padding"
1223
+ value {
1224
+ s: "SAME"
1225
+ }
1226
+ }
1227
+ attr {
1228
+ key: "strides"
1229
+ value {
1230
+ list {
1231
+ i: 1
1232
+ i: 1
1233
+ i: 1
1234
+ i: 1
1235
+ }
1236
+ }
1237
+ }
1238
+ }
1239
+ node {
1240
+ name: "conv8_2_h/BiasAdd"
1241
+ op: "BiasAdd"
1242
+ input: "conv8_2_h/Conv2D"
1243
+ input: "conv8_2_h/bias"
1244
+ }
1245
+ node {
1246
+ name: "conv8_2_h/Relu"
1247
+ op: "Relu"
1248
+ input: "conv8_2_h/BiasAdd"
1249
+ }
1250
+ node {
1251
+ name: "conv9_1_h/Conv2D"
1252
+ op: "Conv2D"
1253
+ input: "conv8_2_h/Relu"
1254
+ input: "conv9_1_h/weights"
1255
+ attr {
1256
+ key: "dilations"
1257
+ value {
1258
+ list {
1259
+ i: 1
1260
+ i: 1
1261
+ i: 1
1262
+ i: 1
1263
+ }
1264
+ }
1265
+ }
1266
+ attr {
1267
+ key: "padding"
1268
+ value {
1269
+ s: "SAME"
1270
+ }
1271
+ }
1272
+ attr {
1273
+ key: "strides"
1274
+ value {
1275
+ list {
1276
+ i: 1
1277
+ i: 1
1278
+ i: 1
1279
+ i: 1
1280
+ }
1281
+ }
1282
+ }
1283
+ }
1284
+ node {
1285
+ name: "conv9_1_h/BiasAdd"
1286
+ op: "BiasAdd"
1287
+ input: "conv9_1_h/Conv2D"
1288
+ input: "conv9_1_h/bias"
1289
+ }
1290
+ node {
1291
+ name: "conv9_1_h/Relu"
1292
+ op: "Relu"
1293
+ input: "conv9_1_h/BiasAdd"
1294
+ }
1295
+ node {
1296
+ name: "conv9_2_h/Conv2D"
1297
+ op: "Conv2D"
1298
+ input: "conv9_1_h/Relu"
1299
+ input: "conv9_2_h/weights"
1300
+ attr {
1301
+ key: "dilations"
1302
+ value {
1303
+ list {
1304
+ i: 1
1305
+ i: 1
1306
+ i: 1
1307
+ i: 1
1308
+ }
1309
+ }
1310
+ }
1311
+ attr {
1312
+ key: "padding"
1313
+ value {
1314
+ s: "SAME"
1315
+ }
1316
+ }
1317
+ attr {
1318
+ key: "strides"
1319
+ value {
1320
+ list {
1321
+ i: 1
1322
+ i: 1
1323
+ i: 1
1324
+ i: 1
1325
+ }
1326
+ }
1327
+ }
1328
+ }
1329
+ node {
1330
+ name: "conv9_2_h/BiasAdd"
1331
+ op: "BiasAdd"
1332
+ input: "conv9_2_h/Conv2D"
1333
+ input: "conv9_2_h/bias"
1334
+ }
1335
+ node {
1336
+ name: "conv9_2_h/Relu"
1337
+ op: "Relu"
1338
+ input: "conv9_2_h/BiasAdd"
1339
+ }
1340
+ node {
1341
+ name: "conv9_2_mbox_loc/Conv2D"
1342
+ op: "Conv2D"
1343
+ input: "conv9_2_h/Relu"
1344
+ input: "conv9_2_mbox_loc/weights"
1345
+ attr {
1346
+ key: "dilations"
1347
+ value {
1348
+ list {
1349
+ i: 1
1350
+ i: 1
1351
+ i: 1
1352
+ i: 1
1353
+ }
1354
+ }
1355
+ }
1356
+ attr {
1357
+ key: "padding"
1358
+ value {
1359
+ s: "SAME"
1360
+ }
1361
+ }
1362
+ attr {
1363
+ key: "strides"
1364
+ value {
1365
+ list {
1366
+ i: 1
1367
+ i: 1
1368
+ i: 1
1369
+ i: 1
1370
+ }
1371
+ }
1372
+ }
1373
+ }
1374
+ node {
1375
+ name: "conv9_2_mbox_loc/BiasAdd"
1376
+ op: "BiasAdd"
1377
+ input: "conv9_2_mbox_loc/Conv2D"
1378
+ input: "conv9_2_mbox_loc/bias"
1379
+ }
1380
+ node {
1381
+ name: "flatten_5/Reshape"
1382
+ op: "Flatten"
1383
+ input: "conv9_2_mbox_loc/BiasAdd"
1384
+ }
1385
+ node {
1386
+ name: "conv9_2_mbox_conf/Conv2D"
1387
+ op: "Conv2D"
1388
+ input: "conv9_2_h/Relu"
1389
+ input: "conv9_2_mbox_conf/weights"
1390
+ attr {
1391
+ key: "dilations"
1392
+ value {
1393
+ list {
1394
+ i: 1
1395
+ i: 1
1396
+ i: 1
1397
+ i: 1
1398
+ }
1399
+ }
1400
+ }
1401
+ attr {
1402
+ key: "padding"
1403
+ value {
1404
+ s: "SAME"
1405
+ }
1406
+ }
1407
+ attr {
1408
+ key: "strides"
1409
+ value {
1410
+ list {
1411
+ i: 1
1412
+ i: 1
1413
+ i: 1
1414
+ i: 1
1415
+ }
1416
+ }
1417
+ }
1418
+ }
1419
+ node {
1420
+ name: "conv9_2_mbox_conf/BiasAdd"
1421
+ op: "BiasAdd"
1422
+ input: "conv9_2_mbox_conf/Conv2D"
1423
+ input: "conv9_2_mbox_conf/bias"
1424
+ }
1425
+ node {
1426
+ name: "flatten_11/Reshape"
1427
+ op: "Flatten"
1428
+ input: "conv9_2_mbox_conf/BiasAdd"
1429
+ }
1430
+ node {
1431
+ name: "conv8_2_mbox_loc/Conv2D"
1432
+ op: "Conv2D"
1433
+ input: "conv8_2_h/Relu"
1434
+ input: "conv8_2_mbox_loc/weights"
1435
+ attr {
1436
+ key: "dilations"
1437
+ value {
1438
+ list {
1439
+ i: 1
1440
+ i: 1
1441
+ i: 1
1442
+ i: 1
1443
+ }
1444
+ }
1445
+ }
1446
+ attr {
1447
+ key: "padding"
1448
+ value {
1449
+ s: "SAME"
1450
+ }
1451
+ }
1452
+ attr {
1453
+ key: "strides"
1454
+ value {
1455
+ list {
1456
+ i: 1
1457
+ i: 1
1458
+ i: 1
1459
+ i: 1
1460
+ }
1461
+ }
1462
+ }
1463
+ }
1464
+ node {
1465
+ name: "conv8_2_mbox_loc/BiasAdd"
1466
+ op: "BiasAdd"
1467
+ input: "conv8_2_mbox_loc/Conv2D"
1468
+ input: "conv8_2_mbox_loc/bias"
1469
+ }
1470
+ node {
1471
+ name: "flatten_4/Reshape"
1472
+ op: "Flatten"
1473
+ input: "conv8_2_mbox_loc/BiasAdd"
1474
+ }
1475
+ node {
1476
+ name: "conv8_2_mbox_conf/Conv2D"
1477
+ op: "Conv2D"
1478
+ input: "conv8_2_h/Relu"
1479
+ input: "conv8_2_mbox_conf/weights"
1480
+ attr {
1481
+ key: "dilations"
1482
+ value {
1483
+ list {
1484
+ i: 1
1485
+ i: 1
1486
+ i: 1
1487
+ i: 1
1488
+ }
1489
+ }
1490
+ }
1491
+ attr {
1492
+ key: "padding"
1493
+ value {
1494
+ s: "SAME"
1495
+ }
1496
+ }
1497
+ attr {
1498
+ key: "strides"
1499
+ value {
1500
+ list {
1501
+ i: 1
1502
+ i: 1
1503
+ i: 1
1504
+ i: 1
1505
+ }
1506
+ }
1507
+ }
1508
+ }
1509
+ node {
1510
+ name: "conv8_2_mbox_conf/BiasAdd"
1511
+ op: "BiasAdd"
1512
+ input: "conv8_2_mbox_conf/Conv2D"
1513
+ input: "conv8_2_mbox_conf/bias"
1514
+ }
1515
+ node {
1516
+ name: "flatten_10/Reshape"
1517
+ op: "Flatten"
1518
+ input: "conv8_2_mbox_conf/BiasAdd"
1519
+ }
1520
+ node {
1521
+ name: "conv7_2_mbox_loc/Conv2D"
1522
+ op: "Conv2D"
1523
+ input: "conv7_2_h/Relu"
1524
+ input: "conv7_2_mbox_loc/weights"
1525
+ attr {
1526
+ key: "dilations"
1527
+ value {
1528
+ list {
1529
+ i: 1
1530
+ i: 1
1531
+ i: 1
1532
+ i: 1
1533
+ }
1534
+ }
1535
+ }
1536
+ attr {
1537
+ key: "padding"
1538
+ value {
1539
+ s: "SAME"
1540
+ }
1541
+ }
1542
+ attr {
1543
+ key: "strides"
1544
+ value {
1545
+ list {
1546
+ i: 1
1547
+ i: 1
1548
+ i: 1
1549
+ i: 1
1550
+ }
1551
+ }
1552
+ }
1553
+ }
1554
+ node {
1555
+ name: "conv7_2_mbox_loc/BiasAdd"
1556
+ op: "BiasAdd"
1557
+ input: "conv7_2_mbox_loc/Conv2D"
1558
+ input: "conv7_2_mbox_loc/bias"
1559
+ }
1560
+ node {
1561
+ name: "flatten_3/Reshape"
1562
+ op: "Flatten"
1563
+ input: "conv7_2_mbox_loc/BiasAdd"
1564
+ }
1565
+ node {
1566
+ name: "conv7_2_mbox_conf/Conv2D"
1567
+ op: "Conv2D"
1568
+ input: "conv7_2_h/Relu"
1569
+ input: "conv7_2_mbox_conf/weights"
1570
+ attr {
1571
+ key: "dilations"
1572
+ value {
1573
+ list {
1574
+ i: 1
1575
+ i: 1
1576
+ i: 1
1577
+ i: 1
1578
+ }
1579
+ }
1580
+ }
1581
+ attr {
1582
+ key: "padding"
1583
+ value {
1584
+ s: "SAME"
1585
+ }
1586
+ }
1587
+ attr {
1588
+ key: "strides"
1589
+ value {
1590
+ list {
1591
+ i: 1
1592
+ i: 1
1593
+ i: 1
1594
+ i: 1
1595
+ }
1596
+ }
1597
+ }
1598
+ }
1599
+ node {
1600
+ name: "conv7_2_mbox_conf/BiasAdd"
1601
+ op: "BiasAdd"
1602
+ input: "conv7_2_mbox_conf/Conv2D"
1603
+ input: "conv7_2_mbox_conf/bias"
1604
+ }
1605
+ node {
1606
+ name: "flatten_9/Reshape"
1607
+ op: "Flatten"
1608
+ input: "conv7_2_mbox_conf/BiasAdd"
1609
+ }
1610
+ node {
1611
+ name: "conv6_2_mbox_loc/Conv2D"
1612
+ op: "Conv2D"
1613
+ input: "conv6_2_h/Relu"
1614
+ input: "conv6_2_mbox_loc/weights"
1615
+ attr {
1616
+ key: "dilations"
1617
+ value {
1618
+ list {
1619
+ i: 1
1620
+ i: 1
1621
+ i: 1
1622
+ i: 1
1623
+ }
1624
+ }
1625
+ }
1626
+ attr {
1627
+ key: "padding"
1628
+ value {
1629
+ s: "SAME"
1630
+ }
1631
+ }
1632
+ attr {
1633
+ key: "strides"
1634
+ value {
1635
+ list {
1636
+ i: 1
1637
+ i: 1
1638
+ i: 1
1639
+ i: 1
1640
+ }
1641
+ }
1642
+ }
1643
+ }
1644
+ node {
1645
+ name: "conv6_2_mbox_loc/BiasAdd"
1646
+ op: "BiasAdd"
1647
+ input: "conv6_2_mbox_loc/Conv2D"
1648
+ input: "conv6_2_mbox_loc/bias"
1649
+ }
1650
+ node {
1651
+ name: "flatten_2/Reshape"
1652
+ op: "Flatten"
1653
+ input: "conv6_2_mbox_loc/BiasAdd"
1654
+ }
1655
+ node {
1656
+ name: "conv6_2_mbox_conf/Conv2D"
1657
+ op: "Conv2D"
1658
+ input: "conv6_2_h/Relu"
1659
+ input: "conv6_2_mbox_conf/weights"
1660
+ attr {
1661
+ key: "dilations"
1662
+ value {
1663
+ list {
1664
+ i: 1
1665
+ i: 1
1666
+ i: 1
1667
+ i: 1
1668
+ }
1669
+ }
1670
+ }
1671
+ attr {
1672
+ key: "padding"
1673
+ value {
1674
+ s: "SAME"
1675
+ }
1676
+ }
1677
+ attr {
1678
+ key: "strides"
1679
+ value {
1680
+ list {
1681
+ i: 1
1682
+ i: 1
1683
+ i: 1
1684
+ i: 1
1685
+ }
1686
+ }
1687
+ }
1688
+ }
1689
+ node {
1690
+ name: "conv6_2_mbox_conf/BiasAdd"
1691
+ op: "BiasAdd"
1692
+ input: "conv6_2_mbox_conf/Conv2D"
1693
+ input: "conv6_2_mbox_conf/bias"
1694
+ }
1695
+ node {
1696
+ name: "flatten_8/Reshape"
1697
+ op: "Flatten"
1698
+ input: "conv6_2_mbox_conf/BiasAdd"
1699
+ }
1700
+ node {
1701
+ name: "fc7_mbox_loc/Conv2D"
1702
+ op: "Conv2D"
1703
+ input: "last_relu"
1704
+ input: "fc7_mbox_loc/weights"
1705
+ attr {
1706
+ key: "dilations"
1707
+ value {
1708
+ list {
1709
+ i: 1
1710
+ i: 1
1711
+ i: 1
1712
+ i: 1
1713
+ }
1714
+ }
1715
+ }
1716
+ attr {
1717
+ key: "padding"
1718
+ value {
1719
+ s: "SAME"
1720
+ }
1721
+ }
1722
+ attr {
1723
+ key: "strides"
1724
+ value {
1725
+ list {
1726
+ i: 1
1727
+ i: 1
1728
+ i: 1
1729
+ i: 1
1730
+ }
1731
+ }
1732
+ }
1733
+ }
1734
+ node {
1735
+ name: "fc7_mbox_loc/BiasAdd"
1736
+ op: "BiasAdd"
1737
+ input: "fc7_mbox_loc/Conv2D"
1738
+ input: "fc7_mbox_loc/bias"
1739
+ }
1740
+ node {
1741
+ name: "flatten_1/Reshape"
1742
+ op: "Flatten"
1743
+ input: "fc7_mbox_loc/BiasAdd"
1744
+ }
1745
+ node {
1746
+ name: "mbox_loc"
1747
+ op: "ConcatV2"
1748
+ input: "flatten/Reshape"
1749
+ input: "flatten_1/Reshape"
1750
+ input: "flatten_2/Reshape"
1751
+ input: "flatten_3/Reshape"
1752
+ input: "flatten_4/Reshape"
1753
+ input: "flatten_5/Reshape"
1754
+ input: "mbox_loc/axis"
1755
+ }
1756
+ node {
1757
+ name: "fc7_mbox_conf/Conv2D"
1758
+ op: "Conv2D"
1759
+ input: "last_relu"
1760
+ input: "fc7_mbox_conf/weights"
1761
+ attr {
1762
+ key: "dilations"
1763
+ value {
1764
+ list {
1765
+ i: 1
1766
+ i: 1
1767
+ i: 1
1768
+ i: 1
1769
+ }
1770
+ }
1771
+ }
1772
+ attr {
1773
+ key: "padding"
1774
+ value {
1775
+ s: "SAME"
1776
+ }
1777
+ }
1778
+ attr {
1779
+ key: "strides"
1780
+ value {
1781
+ list {
1782
+ i: 1
1783
+ i: 1
1784
+ i: 1
1785
+ i: 1
1786
+ }
1787
+ }
1788
+ }
1789
+ }
1790
+ node {
1791
+ name: "fc7_mbox_conf/BiasAdd"
1792
+ op: "BiasAdd"
1793
+ input: "fc7_mbox_conf/Conv2D"
1794
+ input: "fc7_mbox_conf/bias"
1795
+ }
1796
+ node {
1797
+ name: "flatten_7/Reshape"
1798
+ op: "Flatten"
1799
+ input: "fc7_mbox_conf/BiasAdd"
1800
+ }
1801
+ node {
1802
+ name: "mbox_conf"
1803
+ op: "ConcatV2"
1804
+ input: "flatten_6/Reshape"
1805
+ input: "flatten_7/Reshape"
1806
+ input: "flatten_8/Reshape"
1807
+ input: "flatten_9/Reshape"
1808
+ input: "flatten_10/Reshape"
1809
+ input: "flatten_11/Reshape"
1810
+ input: "mbox_conf/axis"
1811
+ }
1812
+ node {
1813
+ name: "mbox_conf_reshape"
1814
+ op: "Reshape"
1815
+ input: "mbox_conf"
1816
+ input: "reshape_before_softmax"
1817
+ }
1818
+ node {
1819
+ name: "mbox_conf_softmax"
1820
+ op: "Softmax"
1821
+ input: "mbox_conf_reshape"
1822
+ attr {
1823
+ key: "axis"
1824
+ value {
1825
+ i: 2
1826
+ }
1827
+ }
1828
+ }
1829
+ node {
1830
+ name: "mbox_conf_flatten"
1831
+ op: "Flatten"
1832
+ input: "mbox_conf_softmax"
1833
+ }
1834
+ node {
1835
+ name: "PriorBox_0"
1836
+ op: "PriorBox"
1837
+ input: "conv4_3_norm/mul_1"
1838
+ input: "data"
1839
+ attr {
1840
+ key: "aspect_ratio"
1841
+ value {
1842
+ tensor {
1843
+ dtype: DT_FLOAT
1844
+ tensor_shape {
1845
+ dim {
1846
+ size: 1
1847
+ }
1848
+ }
1849
+ float_val: 2.0
1850
+ }
1851
+ }
1852
+ }
1853
+ attr {
1854
+ key: "clip"
1855
+ value {
1856
+ b: false
1857
+ }
1858
+ }
1859
+ attr {
1860
+ key: "flip"
1861
+ value {
1862
+ b: true
1863
+ }
1864
+ }
1865
+ attr {
1866
+ key: "max_size"
1867
+ value {
1868
+ i: 60
1869
+ }
1870
+ }
1871
+ attr {
1872
+ key: "min_size"
1873
+ value {
1874
+ i: 30
1875
+ }
1876
+ }
1877
+ attr {
1878
+ key: "offset"
1879
+ value {
1880
+ f: 0.5
1881
+ }
1882
+ }
1883
+ attr {
1884
+ key: "step"
1885
+ value {
1886
+ f: 8.0
1887
+ }
1888
+ }
1889
+ attr {
1890
+ key: "variance"
1891
+ value {
1892
+ tensor {
1893
+ dtype: DT_FLOAT
1894
+ tensor_shape {
1895
+ dim {
1896
+ size: 4
1897
+ }
1898
+ }
1899
+ float_val: 0.10000000149
1900
+ float_val: 0.10000000149
1901
+ float_val: 0.20000000298
1902
+ float_val: 0.20000000298
1903
+ }
1904
+ }
1905
+ }
1906
+ }
1907
+ node {
1908
+ name: "PriorBox_1"
1909
+ op: "PriorBox"
1910
+ input: "last_relu"
1911
+ input: "data"
1912
+ attr {
1913
+ key: "aspect_ratio"
1914
+ value {
1915
+ tensor {
1916
+ dtype: DT_FLOAT
1917
+ tensor_shape {
1918
+ dim {
1919
+ size: 2
1920
+ }
1921
+ }
1922
+ float_val: 2.0
1923
+ float_val: 3.0
1924
+ }
1925
+ }
1926
+ }
1927
+ attr {
1928
+ key: "clip"
1929
+ value {
1930
+ b: false
1931
+ }
1932
+ }
1933
+ attr {
1934
+ key: "flip"
1935
+ value {
1936
+ b: true
1937
+ }
1938
+ }
1939
+ attr {
1940
+ key: "max_size"
1941
+ value {
1942
+ i: 111
1943
+ }
1944
+ }
1945
+ attr {
1946
+ key: "min_size"
1947
+ value {
1948
+ i: 60
1949
+ }
1950
+ }
1951
+ attr {
1952
+ key: "offset"
1953
+ value {
1954
+ f: 0.5
1955
+ }
1956
+ }
1957
+ attr {
1958
+ key: "step"
1959
+ value {
1960
+ f: 16.0
1961
+ }
1962
+ }
1963
+ attr {
1964
+ key: "variance"
1965
+ value {
1966
+ tensor {
1967
+ dtype: DT_FLOAT
1968
+ tensor_shape {
1969
+ dim {
1970
+ size: 4
1971
+ }
1972
+ }
1973
+ float_val: 0.10000000149
1974
+ float_val: 0.10000000149
1975
+ float_val: 0.20000000298
1976
+ float_val: 0.20000000298
1977
+ }
1978
+ }
1979
+ }
1980
+ }
1981
+ node {
1982
+ name: "PriorBox_2"
1983
+ op: "PriorBox"
1984
+ input: "conv6_2_h/Relu"
1985
+ input: "data"
1986
+ attr {
1987
+ key: "aspect_ratio"
1988
+ value {
1989
+ tensor {
1990
+ dtype: DT_FLOAT
1991
+ tensor_shape {
1992
+ dim {
1993
+ size: 2
1994
+ }
1995
+ }
1996
+ float_val: 2.0
1997
+ float_val: 3.0
1998
+ }
1999
+ }
2000
+ }
2001
+ attr {
2002
+ key: "clip"
2003
+ value {
2004
+ b: false
2005
+ }
2006
+ }
2007
+ attr {
2008
+ key: "flip"
2009
+ value {
2010
+ b: true
2011
+ }
2012
+ }
2013
+ attr {
2014
+ key: "max_size"
2015
+ value {
2016
+ i: 162
2017
+ }
2018
+ }
2019
+ attr {
2020
+ key: "min_size"
2021
+ value {
2022
+ i: 111
2023
+ }
2024
+ }
2025
+ attr {
2026
+ key: "offset"
2027
+ value {
2028
+ f: 0.5
2029
+ }
2030
+ }
2031
+ attr {
2032
+ key: "step"
2033
+ value {
2034
+ f: 32.0
2035
+ }
2036
+ }
2037
+ attr {
2038
+ key: "variance"
2039
+ value {
2040
+ tensor {
2041
+ dtype: DT_FLOAT
2042
+ tensor_shape {
2043
+ dim {
2044
+ size: 4
2045
+ }
2046
+ }
2047
+ float_val: 0.10000000149
2048
+ float_val: 0.10000000149
2049
+ float_val: 0.20000000298
2050
+ float_val: 0.20000000298
2051
+ }
2052
+ }
2053
+ }
2054
+ }
2055
+ node {
2056
+ name: "PriorBox_3"
2057
+ op: "PriorBox"
2058
+ input: "conv7_2_h/Relu"
2059
+ input: "data"
2060
+ attr {
2061
+ key: "aspect_ratio"
2062
+ value {
2063
+ tensor {
2064
+ dtype: DT_FLOAT
2065
+ tensor_shape {
2066
+ dim {
2067
+ size: 2
2068
+ }
2069
+ }
2070
+ float_val: 2.0
2071
+ float_val: 3.0
2072
+ }
2073
+ }
2074
+ }
2075
+ attr {
2076
+ key: "clip"
2077
+ value {
2078
+ b: false
2079
+ }
2080
+ }
2081
+ attr {
2082
+ key: "flip"
2083
+ value {
2084
+ b: true
2085
+ }
2086
+ }
2087
+ attr {
2088
+ key: "max_size"
2089
+ value {
2090
+ i: 213
2091
+ }
2092
+ }
2093
+ attr {
2094
+ key: "min_size"
2095
+ value {
2096
+ i: 162
2097
+ }
2098
+ }
2099
+ attr {
2100
+ key: "offset"
2101
+ value {
2102
+ f: 0.5
2103
+ }
2104
+ }
2105
+ attr {
2106
+ key: "step"
2107
+ value {
2108
+ f: 64.0
2109
+ }
2110
+ }
2111
+ attr {
2112
+ key: "variance"
2113
+ value {
2114
+ tensor {
2115
+ dtype: DT_FLOAT
2116
+ tensor_shape {
2117
+ dim {
2118
+ size: 4
2119
+ }
2120
+ }
2121
+ float_val: 0.10000000149
2122
+ float_val: 0.10000000149
2123
+ float_val: 0.20000000298
2124
+ float_val: 0.20000000298
2125
+ }
2126
+ }
2127
+ }
2128
+ }
2129
+ node {
2130
+ name: "PriorBox_4"
2131
+ op: "PriorBox"
2132
+ input: "conv8_2_h/Relu"
2133
+ input: "data"
2134
+ attr {
2135
+ key: "aspect_ratio"
2136
+ value {
2137
+ tensor {
2138
+ dtype: DT_FLOAT
2139
+ tensor_shape {
2140
+ dim {
2141
+ size: 1
2142
+ }
2143
+ }
2144
+ float_val: 2.0
2145
+ }
2146
+ }
2147
+ }
2148
+ attr {
2149
+ key: "clip"
2150
+ value {
2151
+ b: false
2152
+ }
2153
+ }
2154
+ attr {
2155
+ key: "flip"
2156
+ value {
2157
+ b: true
2158
+ }
2159
+ }
2160
+ attr {
2161
+ key: "max_size"
2162
+ value {
2163
+ i: 264
2164
+ }
2165
+ }
2166
+ attr {
2167
+ key: "min_size"
2168
+ value {
2169
+ i: 213
2170
+ }
2171
+ }
2172
+ attr {
2173
+ key: "offset"
2174
+ value {
2175
+ f: 0.5
2176
+ }
2177
+ }
2178
+ attr {
2179
+ key: "step"
2180
+ value {
2181
+ f: 100.0
2182
+ }
2183
+ }
2184
+ attr {
2185
+ key: "variance"
2186
+ value {
2187
+ tensor {
2188
+ dtype: DT_FLOAT
2189
+ tensor_shape {
2190
+ dim {
2191
+ size: 4
2192
+ }
2193
+ }
2194
+ float_val: 0.10000000149
2195
+ float_val: 0.10000000149
2196
+ float_val: 0.20000000298
2197
+ float_val: 0.20000000298
2198
+ }
2199
+ }
2200
+ }
2201
+ }
2202
+ node {
2203
+ name: "PriorBox_5"
2204
+ op: "PriorBox"
2205
+ input: "conv9_2_h/Relu"
2206
+ input: "data"
2207
+ attr {
2208
+ key: "aspect_ratio"
2209
+ value {
2210
+ tensor {
2211
+ dtype: DT_FLOAT
2212
+ tensor_shape {
2213
+ dim {
2214
+ size: 1
2215
+ }
2216
+ }
2217
+ float_val: 2.0
2218
+ }
2219
+ }
2220
+ }
2221
+ attr {
2222
+ key: "clip"
2223
+ value {
2224
+ b: false
2225
+ }
2226
+ }
2227
+ attr {
2228
+ key: "flip"
2229
+ value {
2230
+ b: true
2231
+ }
2232
+ }
2233
+ attr {
2234
+ key: "max_size"
2235
+ value {
2236
+ i: 315
2237
+ }
2238
+ }
2239
+ attr {
2240
+ key: "min_size"
2241
+ value {
2242
+ i: 264
2243
+ }
2244
+ }
2245
+ attr {
2246
+ key: "offset"
2247
+ value {
2248
+ f: 0.5
2249
+ }
2250
+ }
2251
+ attr {
2252
+ key: "step"
2253
+ value {
2254
+ f: 300.0
2255
+ }
2256
+ }
2257
+ attr {
2258
+ key: "variance"
2259
+ value {
2260
+ tensor {
2261
+ dtype: DT_FLOAT
2262
+ tensor_shape {
2263
+ dim {
2264
+ size: 4
2265
+ }
2266
+ }
2267
+ float_val: 0.10000000149
2268
+ float_val: 0.10000000149
2269
+ float_val: 0.20000000298
2270
+ float_val: 0.20000000298
2271
+ }
2272
+ }
2273
+ }
2274
+ }
2275
+ node {
2276
+ name: "mbox_priorbox"
2277
+ op: "ConcatV2"
2278
+ input: "PriorBox_0"
2279
+ input: "PriorBox_1"
2280
+ input: "PriorBox_2"
2281
+ input: "PriorBox_3"
2282
+ input: "PriorBox_4"
2283
+ input: "PriorBox_5"
2284
+ input: "mbox_loc/axis"
2285
+ }
2286
+ node {
2287
+ name: "detection_out"
2288
+ op: "DetectionOutput"
2289
+ input: "mbox_loc"
2290
+ input: "mbox_conf_flatten"
2291
+ input: "mbox_priorbox"
2292
+ attr {
2293
+ key: "background_label_id"
2294
+ value {
2295
+ i: 0
2296
+ }
2297
+ }
2298
+ attr {
2299
+ key: "code_type"
2300
+ value {
2301
+ s: "CENTER_SIZE"
2302
+ }
2303
+ }
2304
+ attr {
2305
+ key: "confidence_threshold"
2306
+ value {
2307
+ f: 0.00999999977648
2308
+ }
2309
+ }
2310
+ attr {
2311
+ key: "keep_top_k"
2312
+ value {
2313
+ i: 200
2314
+ }
2315
+ }
2316
+ attr {
2317
+ key: "nms_threshold"
2318
+ value {
2319
+ f: 0.449999988079
2320
+ }
2321
+ }
2322
+ attr {
2323
+ key: "num_classes"
2324
+ value {
2325
+ i: 2
2326
+ }
2327
+ }
2328
+ attr {
2329
+ key: "share_location"
2330
+ value {
2331
+ b: true
2332
+ }
2333
+ }
2334
+ attr {
2335
+ key: "top_k"
2336
+ value {
2337
+ i: 400
2338
+ }
2339
+ }
2340
+ }
2341
+ node {
2342
+ name: "reshape_before_softmax"
2343
+ op: "Const"
2344
+ attr {
2345
+ key: "value"
2346
+ value {
2347
+ tensor {
2348
+ dtype: DT_INT32
2349
+ tensor_shape {
2350
+ dim {
2351
+ size: 3
2352
+ }
2353
+ }
2354
+ int_val: 0
2355
+ int_val: -1
2356
+ int_val: 2
2357
+ }
2358
+ }
2359
+ }
2360
+ }
2361
+ library {
2362
+ }
emotion_recognition/models/opencv_face_detector_uint8.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c71d752ef2cbf2f457ac82fdd580fcb2522fd04c5efdaed18eb6d9e2843fbed
3
+ size 2727750
main.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request
2
+ import os
3
+ # from "./emotion_recognition/ai_model_photo" import ai
4
+ from emotion_recognition import ai_model_photo
5
+ # from palm_response import getResponse, init_connect
6
+ from flask_mysqldb import MySQL
7
+ import base64
8
+ import google.generativeai as genai
9
+
10
+ app = Flask(__name__)
11
+
12
+ app.config['MYSQL_HOST'] = 'localhost'
13
+ app.config['MYSQL_USER'] = 'root'
14
+ app.config['MYSQL_PASSWORD'] = ''
15
+ app.config['MYSQL_DB'] = 'wanees_app'
16
+
17
+ mysql = MySQL(app)
18
+
19
+
20
+ def add_padding(base64_str):
21
+ missing_padding = len(base64_str) % 4
22
+ if missing_padding != 0:
23
+ base64_str += '=' * (4 - missing_padding)
24
+ return base64_str
25
+
26
+
27
+ @app.route('/api/chat/gemini', methods=['POST'])
28
+ def receive_chat():
29
+ try:
30
+ data = request.get_json()
31
+ prompt = data['message']
32
+ print(data)
33
+ print('promp is : ',prompt)
34
+ genai.configure(api_key="AIzaSyB7bKpGzkT_f32d-Xv4tb9zIBtb8vJUCCs")
35
+ model = genai.GenerativeModel('gemini-pro')
36
+ response = model.generate_content(prompt)
37
+ print(response.text)
38
+
39
+ generated_text = response.text
40
+ print('generated_text: ',generated_text)
41
+ return jsonify({"response": generated_text}), 200
42
+ except Exception as e:
43
+ print('the catch is:',str(e))
44
+ return jsonify({"error": str(e)})
45
+
46
+ # openai.api_key = "sk-proj-QtkCbMdxHjgMAyughGjgT3BlbkFJuoyNuhK2zO3uKm1Zc5S6"
47
+
48
+ # @backoff.on_exception(backoff.expo, RateLimitError, max_tries=8)
49
+ # def completions_with_backoff(client, messages):
50
+ # response = client.chat.completions.create(
51
+ # model="gpt-3.5-turbo",
52
+ # messages=messages,
53
+ # temperature=0,
54
+ # )
55
+ # return response
56
+
57
+
58
+ # @app.route('/api/chat/gpt', methods=['POST'])
59
+ # def receive_message():
60
+ # try:
61
+ # data = request.get_json()
62
+ # prompt = data.get('prompt', 'Hello')
63
+ # print(data)
64
+ # print('promp is : ',prompt)
65
+ # # client = OpenAI()
66
+ # messages = [{"role": "user", "content": prompt}]
67
+ # client = OpenAI(
68
+ # # This is the default and can be omitted
69
+ # api_key="sk-proj-EPQes2YgSJEx1e6EwDarT3BlbkFJrEtY36IDssJKi6VkVP7v"
70
+ # )
71
+ # #
72
+ # response = client.chat.completions.create(
73
+ # model="gpt-3.5-turbo",
74
+ # messages=messages,
75
+ # temperature=0,
76
+ # )
77
+
78
+ # # response = openai.Completion.create(
79
+ # # engine="text-davinci-003",
80
+ # # prompt=prompt,
81
+ # # max_tokens=251,
82
+ # # temperature=0,
83
+ # # top_p=1
84
+ # # )
85
+ # print('response is: ',response.choices[0].message["content"])
86
+
87
+ # generated_text = response.choices[0].message["content"]
88
+ # print('generated_text: ',generated_text)
89
+ # return jsonify({"response": generated_text}), 200
90
+ # except Exception as e:
91
+ # print('the catch is:',str(e))
92
+ # return jsonify({"error": str(e)})
93
+
94
+
95
+ @app.route('/api/photo', methods=['POST'])
96
+ def receive_photo():
97
+ global photo_path
98
+ photo_data = request.files['photo']
99
+ photo_path = os.path.join('.', photo_data.filename)
100
+ photo_data.save(photo_path)
101
+ res = ai_model_photo.ai(photo_path)
102
+ print(photo_path)
103
+ return jsonify({'message': res}), 200
104
+ # return Response(status=200, mimetype='application/json')
105
+
106
+
107
+ @app.route('/api/facedetect', methods=['POST'])
108
+ def AddPhoto():
109
+ name = request.json['name']
110
+ email = request.json['email']
111
+ # mobile = request.json['mobile']
112
+ img64 = request.json['image']
113
+ img64_padded = add_padding(img64)
114
+ imageBinary = base64.b64decode(img64_padded)
115
+
116
+ imgdetect = request.json['imagedetect']
117
+ imgdetect_padded = add_padding(imgdetect)
118
+ imageBinarydetect = base64.b64decode(imgdetect_padded)
119
+ photo_path = os.path.join('.', imageBinarydetect.filename)
120
+ imageBinary.save(photo_path)
121
+ res = ai_model_photo.ai(photo_path)
122
+ print(photo_path)
123
+
124
+ # def receive_photo():
125
+ # global photo_path
126
+ # photo_data = request.files['photo']
127
+ # photo_path = os.path.join('.', photo_data.filename)
128
+ # photo_data.save(photo_path)
129
+ # res = ai_model_photo.ai(photo_path)
130
+ # print(photo_path)
131
+ return jsonify({'message': res}), 200
132
+
133
+
134
+ # @app.route('/api/chat/palm', methods=['POST'])
135
+ # def receive_message():
136
+ # message ={
137
+ # "message" : request.json['message']
138
+ # }
139
+ # response_palm = getResponse(message["message"])
140
+ # print("res palm =====", response_palm)
141
+ # return jsonify({'message': response_palm}), 200
142
+
143
+
144
+ @app.route('/add', methods=['POST'])
145
+ def Add():
146
+ name = request.json['name']
147
+ email = request.json['email']
148
+ mobile = request.json['mobile']
149
+ mobile_emergency = request.json['mobile_emergency']
150
+ age = request.json['age']
151
+ gender = request.json['gender']
152
+ location = request.json['location']
153
+ img64 = request.json['image']
154
+ user_disease = request.json['user_disease']
155
+ user_medicine = request.json['user_medicine']
156
+
157
+ img64_padded = add_padding(img64)
158
+ imageBinary = base64.b64decode(img64_padded)
159
+
160
+ cur = mysql.connection.cursor()
161
+ cur.execute("""
162
+ INSERT INTO users
163
+ (name, email ,mobile, mobile_emergency, age, gender, location, image)
164
+ VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
165
+ """, (name, email, mobile, mobile_emergency, age, gender, location, imageBinary))
166
+ mysql.connection.commit()
167
+ cur.execute("""
168
+ SELECT * FROM users
169
+ WHERE name=%s AND email=%s
170
+ """, (name, email))
171
+ data = cur.fetchall()
172
+ id = data[0][0]
173
+ for disease in user_disease:
174
+ print(disease)
175
+ cur.execute("""INSERT INTO user_disease
176
+ (user_id, disease_name)
177
+ VALUES (%s, %s)
178
+ """, (id, disease))
179
+ for medicine in user_medicine:
180
+ img64 = medicine['medicine_image']
181
+ img64_padded = add_padding(img64)
182
+ imageBinary = base64.b64decode(img64_padded)
183
+ cur.execute("""INSERT INTO user_medicine
184
+ (user_id, medicine_name,medicine_dose ,medicine_image)
185
+ VALUES (%s, %s, %s, %s)
186
+ """, (id, medicine['medicine_name'], medicine['medicine_dose'], medicine['medicine_image']))
187
+ mysql.connection.commit()
188
+ cur.close()
189
+ return jsonify({"message": "done"}), 200
190
+
191
+
192
+ @app.route('/delete/<string:email>', methods=['DELETE'])
193
+ def Delete(email):
194
+ cur = mysql.connection.cursor()
195
+ cur.execute("""
196
+ SELECT id FROM users
197
+ WHERE email=%s
198
+ """, (email, ))
199
+ data = cur.fetchall()
200
+ id = data[0][0]
201
+ cur.execute("DELETE FROM user_disease WHERE user_id=%s", (id,))
202
+ cur.execute("DELETE FROM user_medicine WHERE user_id=%s", (id,))
203
+ cur.execute("DELETE FROM users WHERE id=%s", (id,))
204
+ mysql.connection.commit()
205
+ return jsonify({"message": "done"}), 200
206
+
207
+
208
+ @app.route('/update/<int:id>', methods=['PUT'])
209
+ def Update(id):
210
+ name = request.json['name']
211
+ email = request.json['email']
212
+ mobile = request.json['mobile']
213
+ mobile_emergency = request.json['mobile_emergency']
214
+ age = request.json['age']
215
+ gender = request.json['gender']
216
+ location = request.json['location']
217
+ img64 = request.json['image']
218
+ user_disease = request.json['user_disease']
219
+ user_medicine = request.json['user_medicine']
220
+
221
+ img64_padded = add_padding(img64)
222
+ imageBinary = base64.b64decode(img64_padded)
223
+
224
+ cur = mysql.connection.cursor()
225
+ cur.execute("""
226
+ UPDATE users
227
+ SET name=%s,email=%s, mobile=%s ,mobile_emergency=%s ,age=%s ,gender=%s ,location=%s ,image=%s
228
+ WHERE id=%s
229
+ """, (name, email, mobile, mobile_emergency, age, gender, location, imageBinary, id))
230
+ cur.execute("DELETE FROM user_disease WHERE user_id=%s", (id,))
231
+ cur.execute("DELETE FROM user_medicine WHERE user_id=%s", (id,))
232
+
233
+ for disease in user_disease:
234
+ cur.execute("""INSERT INTO user_disease
235
+ (user_id, disease_name)
236
+ VALUES (%s, %s)
237
+ """, (id, disease['disease_name']))
238
+ for medicine in user_medicine:
239
+ cur.execute("""INSERT INTO user_medicine
240
+ (user_id, medicine_name, medicine_image)
241
+ VALUES (%s, %s, %s)
242
+ """, (id, medicine['medicine_name'], medicine['medicine_image']))
243
+ mysql.connection.commit()
244
+ return jsonify({"message": "done"}), 200
245
+
246
+
247
+ @app.route('/select/<int:id>', methods=['GET'])
248
+ def Select(id):
249
+ cur = mysql.connection.cursor()
250
+ cur.execute("SELECT * FROM users WHERE id=%s", (id,))
251
+ table1 = cur.fetchall()
252
+ cur.execute("SELECT * FROM user_disease WHERE user_id=%s", (id,))
253
+ table2 = cur.fetchall()
254
+ cur.execute("SELECT * FROM user_medicine WHERE user_id=%s", (id,))
255
+ table3 = cur.fetchall()
256
+ cur.close()
257
+ user_data = {
258
+ "name": table1[0][1],
259
+ "email": table1[0][2],
260
+ "mobile": table1[0][3],
261
+ "mobile_emergency": table1[0][4],
262
+ "age": table1[0][5],
263
+ "gender": table1[0][6],
264
+ "location": table1[0][7],
265
+ "image": base64.b64encode(table1[0][8]).decode('utf-8'),
266
+ "user_disease": [{"disease_name": disease[1]} for disease in table2],
267
+ "user_medicine": [{"medicine_name": medicine[1], "medicine_image": base64.b64encode(medicine[3]).decode('utf-8')} for medicine in table3]
268
+ }
269
+ return jsonify(user_data), 200
270
+
271
+
272
+ if __name__ == '__main__':
273
+ app.run(host='0.0.0.0', port=5000, debug=True)
274
+ # init_connect()
275
+ # app.run(host='192.168.137.241', port=5000, debug=True) # 192.168.1.114 http://192.168.137.241:5000 192.168.1.122
276
+
requirements.txt ADDED
File without changes