Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import numpy as np
|
|
6 |
from PIL import Image
|
7 |
import tensorflow as tf
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
|
|
9 |
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
"nvidia/segformer-b0-finetuned-cityscapes-512-1024"
|
@@ -102,19 +103,12 @@ def sepia(input_img):
|
|
102 |
return fig
|
103 |
|
104 |
custom_template = "my_custom_template.html"
|
105 |
-
|
106 |
-
def
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
explanation = f"The model predicts this image as class {prediction}."
|
112 |
-
|
113 |
-
# 시각화: 예측 설명 텍스트를 이미지에 표시
|
114 |
-
result_image = input_img.copy()
|
115 |
-
cv2.putText(result_image, explanation, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
116 |
-
|
117 |
-
return result_image
|
118 |
|
119 |
|
120 |
demo = gr.Interface(fn=sepia,
|
|
|
6 |
from PIL import Image
|
7 |
import tensorflow as tf
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
+
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
|
10 |
|
11 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
12 |
"nvidia/segformer-b0-finetuned-cityscapes-512-1024"
|
|
|
103 |
return fig
|
104 |
|
105 |
custom_template = "my_custom_template.html"
|
106 |
+
model = ResNet50()
|
107 |
+
def classify_image(inp):
|
108 |
+
inp = preprocess_input(inp)
|
109 |
+
preds = model.predict(inp)
|
110 |
+
label = decode_predictions(preds, top=1)[0][0][1]
|
111 |
+
return label
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
|
114 |
demo = gr.Interface(fn=sepia,
|