Spaces:
Runtime error
Runtime error
combining model and upload image code
Browse files
app.py
CHANGED
@@ -24,22 +24,15 @@ from image_posterior import create_gif
|
|
24 |
def get_image_data(inp_image):
|
25 |
"""Gets the image data and model."""
|
26 |
image = get_dataset_by_name(inp_image, get_label=False)
|
27 |
-
print("image returned\n", image)
|
28 |
model_and_data = process_imagenet_get_model(image)
|
29 |
-
print("model returned\n", model_and_data)
|
30 |
return image, model_and_data
|
31 |
|
32 |
|
33 |
def segmentation_generation(image_name, c_width, n_top, n_gif_imgs):
|
34 |
-
print("Inputs Received:", image_name, c_width, n_top, n_gif_imgs)
|
35 |
|
36 |
-
get_image_data(image_name)
|
37 |
-
|
38 |
-
return "yeehaw"
|
39 |
-
|
40 |
-
# cred_width = c_width
|
41 |
-
# n_top_segs = n_top
|
42 |
-
# n_gif_images = n_gif_imgs
|
43 |
image, model_and_data = get_image_data(image_name)
|
44 |
# print("model_and_data", model_and_data)
|
45 |
# Unpack datax
|
@@ -50,10 +43,10 @@ def segmentation_generation(image_name, c_width, n_top, n_gif_imgs):
|
|
50 |
label = model_and_data["label"]
|
51 |
|
52 |
|
53 |
-
if (image_name == 'imagenet_diego'):
|
54 |
-
|
55 |
-
elif (image_name == 'imagenet_french_bulldog'):
|
56 |
-
|
57 |
|
58 |
# Unpack instance and segments
|
59 |
instance = xtest[0]
|
@@ -66,8 +59,8 @@ def segmentation_generation(image_name, c_width, n_top, n_gif_imgs):
|
|
66 |
xtrain = get_xtrain(segments)
|
67 |
|
68 |
prediction = np.argmax(cur_model(xtrain[:1]), axis=1)
|
69 |
-
if image_name in ["imagenet_diego", "imagenet_french_bulldog"]:
|
70 |
-
|
71 |
|
72 |
# Compute explanation
|
73 |
exp_init = BayesLocalExplanations(training_data=xtrain,
|
@@ -85,16 +78,11 @@ def segmentation_generation(image_name, c_width, n_top, n_gif_imgs):
|
|
85 |
# Create the gif of the explanation
|
86 |
return create_gif(rout['blr'], image_name, segments, instance, prediction[0], n_gif_imgs, n_top)
|
87 |
|
88 |
-
def image_mod(image):
|
89 |
-
return image.rotate(45)
|
90 |
-
|
91 |
if __name__ == "__main__":
|
92 |
# gradio's image inputs look like this: <PIL.Image.Image image mode=RGB size=305x266 at 0x7F3D01C91FA0>
|
93 |
# need to learn how to handle image inputs, or deal with file inputs or just file path strings
|
94 |
-
|
95 |
-
|
96 |
-
out = 'text'
|
97 |
-
# out = [gr.outputs.HTML(label="Output GIF"), gr.outputs.Textbox(label="Prediction")]
|
98 |
|
99 |
iface = gr.Interface(
|
100 |
segmentation_generation,
|
@@ -105,7 +93,7 @@ if __name__ == "__main__":
|
|
105 |
gr.inputs.Slider(minimum=10, maximum=50, step=1, default=20, label="n_gif_images", optional=False),
|
106 |
],
|
107 |
outputs=out,
|
108 |
-
examples=[["imagenet_diego", 0.01, 7, 50],
|
109 |
-
["imagenet_french_bulldog", 0.05, 5, 50]]
|
110 |
)
|
111 |
iface.launch(enable_queue=True)
|
|
|
24 |
def get_image_data(inp_image):
|
25 |
"""Gets the image data and model."""
|
26 |
image = get_dataset_by_name(inp_image, get_label=False)
|
27 |
+
# print("image returned\n", image)
|
28 |
model_and_data = process_imagenet_get_model(image)
|
29 |
+
# print("model returned\n", model_and_data)
|
30 |
return image, model_and_data
|
31 |
|
32 |
|
33 |
def segmentation_generation(image_name, c_width, n_top, n_gif_imgs):
|
34 |
+
print("Inputs Received:", image_name, c_width, n_top, n_gif_imgs)
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
image, model_and_data = get_image_data(image_name)
|
37 |
# print("model_and_data", model_and_data)
|
38 |
# Unpack datax
|
|
|
43 |
label = model_and_data["label"]
|
44 |
|
45 |
|
46 |
+
# if (image_name == 'imagenet_diego'):
|
47 |
+
# label = 156
|
48 |
+
# elif (image_name == 'imagenet_french_bulldog'):
|
49 |
+
# label = 245
|
50 |
|
51 |
# Unpack instance and segments
|
52 |
instance = xtest[0]
|
|
|
59 |
xtrain = get_xtrain(segments)
|
60 |
|
61 |
prediction = np.argmax(cur_model(xtrain[:1]), axis=1)
|
62 |
+
# if image_name in ["imagenet_diego", "imagenet_french_bulldog"]:
|
63 |
+
# assert prediction == label, f"Prediction is {prediction} not {label}"
|
64 |
|
65 |
# Compute explanation
|
66 |
exp_init = BayesLocalExplanations(training_data=xtrain,
|
|
|
78 |
# Create the gif of the explanation
|
79 |
return create_gif(rout['blr'], image_name, segments, instance, prediction[0], n_gif_imgs, n_top)
|
80 |
|
|
|
|
|
|
|
81 |
if __name__ == "__main__":
|
82 |
# gradio's image inputs look like this: <PIL.Image.Image image mode=RGB size=305x266 at 0x7F3D01C91FA0>
|
83 |
# need to learn how to handle image inputs, or deal with file inputs or just file path strings
|
84 |
+
inp = gr.inputs.Image(label="Input Image (Or select an example)", type="pil")
|
85 |
+
out = [gr.outputs.HTML(label="Output GIF"), gr.outputs.Textbox(label="Prediction")]
|
|
|
|
|
86 |
|
87 |
iface = gr.Interface(
|
88 |
segmentation_generation,
|
|
|
93 |
gr.inputs.Slider(minimum=10, maximum=50, step=1, default=20, label="n_gif_images", optional=False),
|
94 |
],
|
95 |
outputs=out,
|
96 |
+
examples=[["./imagenet_diego.png", 0.01, 7, 50],
|
97 |
+
["./imagenet_french_bulldog.jpg", 0.05, 5, 50]]
|
98 |
)
|
99 |
iface.launch(enable_queue=True)
|