Spaces:
Runtime error
Runtime error
lakshmi324
commited on
Commit
•
e755da3
1
Parent(s):
1783472
Updated the app fie for gardio changes
Browse files- app.py +103 -5
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import pandas as pd
|
2 |
|
3 |
from torchvision import transforms
|
@@ -7,7 +8,14 @@ from detecto.visualize import show_labeled_image
|
|
7 |
from detecto.core import Model
|
8 |
import matplotlib.pyplot as plt
|
9 |
import matplotlib.image as img
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
transform_img = transforms.Compose([transforms.ToPILImage(),
|
13 |
transforms.Resize(400),
|
@@ -17,14 +25,104 @@ transform_img = transforms.Compose([transforms.ToPILImage(),
|
|
17 |
|
18 |
|
19 |
labels = ['damage','BG']
|
20 |
-
model = Model.load('/
|
21 |
|
22 |
-
|
|
|
|
|
23 |
'''Function takes input of the damaged vehicle
|
24 |
and provides the damaged area of the vehicle
|
25 |
'''
|
26 |
-
image = utils.read_image(
|
27 |
new_image = transform_img(image)
|
28 |
labels, boxes, scores = model.predict(image)
|
29 |
top = len(scores[scores > .5])
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
import pandas as pd
|
3 |
|
4 |
from torchvision import transforms
|
|
|
8 |
from detecto.core import Model
|
9 |
import matplotlib.pyplot as plt
|
10 |
import matplotlib.image as img
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
import gradio as gr
|
13 |
+
import os
|
14 |
+
from typing import List, Optional
|
15 |
+
import matplotlib.pyplot as plt
|
16 |
+
import numpy as np
|
17 |
+
from matplotlib import patches
|
18 |
+
from pathlib import Path
|
19 |
|
20 |
transform_img = transforms.Compose([transforms.ToPILImage(),
|
21 |
transforms.Resize(400),
|
|
|
25 |
|
26 |
|
27 |
labels = ['damage','BG']
|
28 |
+
model = Model.load('/Trained_Model.pth', labels) # CHange this while uploading
|
29 |
|
30 |
+
|
31 |
+
|
32 |
+
def prediction_defect(input_image,model = model):
|
33 |
'''Function takes input of the damaged vehicle
|
34 |
and provides the damaged area of the vehicle
|
35 |
'''
|
36 |
+
image = utils.read_image(input_image)
|
37 |
new_image = transform_img(image)
|
38 |
labels, boxes, scores = model.predict(image)
|
39 |
top = len(scores[scores > .5])
|
40 |
+
|
41 |
+
return plot_bboxes( input_image, bboxes= boxes[:top],
|
42 |
+
xywh=False, labels=labels[:top])
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
def plot_bboxes(
|
47 |
+
image_file: str,
|
48 |
+
bboxes: List[List[float]],
|
49 |
+
xywh: bool = True,
|
50 |
+
labels: Optional[List[str]] = None
|
51 |
+
) -> None:
|
52 |
+
"""
|
53 |
+
Args:
|
54 |
+
image_file: str specifying the image file path
|
55 |
+
bboxes: list of bounding box annotations for all the detections
|
56 |
+
xywh: bool, if True, the bounding box annotations are specified as
|
57 |
+
[xmin, ymin, width, height]. If False the annotations are specified as
|
58 |
+
[xmin, ymin, xmax, ymax]. If you are unsure what the mode is try both
|
59 |
+
and check the saved image to see which setting gives the
|
60 |
+
correct visualization.
|
61 |
+
|
62 |
+
"""
|
63 |
+
fig = plt.figure()
|
64 |
+
|
65 |
+
# add axes to the image
|
66 |
+
ax = fig.add_axes([0, 0, 1, 1])
|
67 |
+
|
68 |
+
image_folder = Path(image_file).parent
|
69 |
+
|
70 |
+
# read and plot the image
|
71 |
+
image = plt.imread(image_file)
|
72 |
+
plt.imshow(image)
|
73 |
+
|
74 |
+
# Iterate over all the bounding boxes
|
75 |
+
for i, bbox in enumerate(bboxes):
|
76 |
+
if xywh:
|
77 |
+
xmin, ymin, w, h = bbox
|
78 |
+
else:
|
79 |
+
xmin, ymin, xmax, ymax = bbox
|
80 |
+
w = xmax - xmin
|
81 |
+
h = ymax - ymin
|
82 |
+
|
83 |
+
# add bounding boxes to the image
|
84 |
+
box = patches.Rectangle(
|
85 |
+
(xmin, ymin), w, h, edgecolor="red", facecolor="none"
|
86 |
+
)
|
87 |
+
|
88 |
+
ax.add_patch(box)
|
89 |
+
|
90 |
+
if labels is not None:
|
91 |
+
rx, ry = box.get_xy()
|
92 |
+
cx = rx + box.get_width()/2.0
|
93 |
+
cy = ry + box.get_height()/8.0
|
94 |
+
l = ax.annotate(
|
95 |
+
labels[i],
|
96 |
+
(cx, cy),
|
97 |
+
fontsize=8,
|
98 |
+
fontweight="bold",
|
99 |
+
color="white",
|
100 |
+
ha='center',
|
101 |
+
va='center'
|
102 |
+
)
|
103 |
+
l.set_bbox(
|
104 |
+
dict(facecolor='red', alpha=0.5, edgecolor='red')
|
105 |
+
)
|
106 |
+
|
107 |
+
plt.axis('off')
|
108 |
+
outfile = os.path.join(image_folder, "image_bbox.jpg")
|
109 |
+
fig.savefig(outfile)
|
110 |
+
|
111 |
+
print("Saved image with detections to %s" % outfile)
|
112 |
+
return outfile
|
113 |
+
|
114 |
+
|
115 |
+
gr.Interface(fn=prediction_defect,
|
116 |
+
|
117 |
+
inputs = [ gr.inputs.Image(type="filepath", label="Please Upload the Defect Image") ],
|
118 |
+
outputs= [gr.outputs.Image(type="pil")],
|
119 |
+
examples=[]).launch(debug= True)
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
pandas
|
2 |
detecto
|
3 |
-
torchvision
|
|
|
|
1 |
pandas
|
2 |
detecto
|
3 |
+
torchvision
|
4 |
+
gradio
|