ThisIsM commited on
Commit
14126bd
Β·
1 Parent(s): dd4b547

Deleted app.ipynb

Browse files
app.ipynb DELETED
@@ -1,223 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 1,
6
- "metadata": {},
7
- "outputs": [
8
- {
9
- "name": "stderr",
10
- "output_type": "stream",
11
- "text": [
12
- "/home/mahnaz/mlprojects/bloom_classifier/ven_bloom_gradio/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
- " from .autonotebook import tqdm as notebook_tqdm\n"
14
- ]
15
- }
16
- ],
17
- "source": [
18
- "import gradio as gr\n",
19
- "import json\n",
20
- "from transformers import pipeline\n",
21
- "from transformers import AutoImageProcessor\n",
22
- "from PIL import Image"
23
- ]
24
- },
25
- {
26
- "cell_type": "code",
27
- "execution_count": 10,
28
- "metadata": {},
29
- "outputs": [],
30
- "source": [
31
- "from PIL import Image\n",
32
- "from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\n",
33
- "import numpy as np\n",
34
- "\n",
35
- "def preprocess_input(input_data, image_processor):\n",
36
- " \"\"\"\n",
37
- " Preprocesses the input image for inference.\n",
38
- "\n",
39
- " Parameters:\n",
40
- " input_data (str or np.ndarray): Path to the image file in .jpg format or a NumPy array.\n",
41
- " image_processor (AutoImageProcessor): An instance of AutoImageProcessor from the model's checkpoint.\n",
42
- "\n",
43
- " Returns:\n",
44
- " processed_img (torch.Tensor): Preprocessed image ready for inference.\n",
45
- " \"\"\"\n",
46
- " # Load the image based on the input type\n",
47
- " if isinstance(input_data, str):\n",
48
- " img = Image.open(input_data).convert('RGB')\n",
49
- " elif isinstance(input_data, np.ndarray):\n",
50
- " img = Image.fromarray(input_data.astype('uint8'), 'RGB')\n",
51
- " else:\n",
52
- " raise ValueError(\"Unsupported input type. Only str and np.ndarray are supported.\")\n",
53
- " \n",
54
- " # Obtain the mean and std from image_processor\n",
55
- " mean = image_processor.image_mean\n",
56
- " std = image_processor.image_std\n",
57
- " \n",
58
- " # Obtain the image size from image_processor\n",
59
- " size = (\n",
60
- " image_processor.size[\"shortest_edge\"]\n",
61
- " if \"shortest_edge\" in image_processor.size\n",
62
- " else (image_processor.size[\"height\"], image_processor.size[\"width\"])\n",
63
- " )\n",
64
- " \n",
65
- " # Define the transformations\n",
66
- " preprocess = Compose([\n",
67
- " Resize(size), # Resizing to the same size used during training\n",
68
- " CenterCrop(size), # Center cropping to the same size used during training\n",
69
- " ToTensor(),\n",
70
- " Normalize(mean=mean, std=std)\n",
71
- " ])\n",
72
- " \n",
73
- " # Apply the transformations\n",
74
- " processed_img = preprocess(img)\n",
75
- " \n",
76
- " # Add a batch dimension\n",
77
- " processed_img = processed_img.unsqueeze(0) # This is necessary because the model expects a batch\n",
78
- " to_pil = ToPILImage()\n",
79
- " processed_img = to_pil(processed_img)\n",
80
- "\n",
81
- " return processed_img\n"
82
- ]
83
- },
84
- {
85
- "cell_type": "code",
86
- "execution_count": 13,
87
- "metadata": {},
88
- "outputs": [],
89
- "source": [
90
- "from PIL import Image\n",
91
- "from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\n",
92
- "\n",
93
- "def preprocess_input(image_path, image_processor):\n",
94
- " \"\"\"\n",
95
- " Preprocesses the input image for inference.\n",
96
- "\n",
97
- " Parameters:\n",
98
- " image_path (str): Path to the image file in .jpg format.\n",
99
- " image_processor (AutoImageProcessor): An instance of AutoImageProcessor from the model's checkpoint.\n",
100
- "\n",
101
- " Returns:\n",
102
- " processed_img (torch.Tensor): Preprocessed image ready for inference.\n",
103
- " \"\"\"\n",
104
- " # Load the image\n",
105
- " img = Image.open(image_path).convert('RGB')\n",
106
- " \n",
107
- " # Obtain the mean and std from image_processor\n",
108
- " mean = image_processor.image_mean\n",
109
- " std = image_processor.image_std\n",
110
- " \n",
111
- " # Obtain the image size from image_processor\n",
112
- " size = (\n",
113
- " image_processor.size[\"shortest_edge\"]\n",
114
- " if \"shortest_edge\" in image_processor.size\n",
115
- " else (image_processor.size[\"height\"], image_processor.size[\"width\"])\n",
116
- " )\n",
117
- " \n",
118
- " # Define the transformations\n",
119
- " preprocess = Compose([\n",
120
- " Resize(size), # Resizing to the same size used during training\n",
121
- " CenterCrop(size), # Center cropping to the same size used during training\n",
122
- " ToTensor(),\n",
123
- " Normalize(mean=mean, std=std)\n",
124
- " ])\n",
125
- " \n",
126
- " # Apply the transformations\n",
127
- " processed_img = preprocess(img)\n",
128
- " \n",
129
- " # Add a batch dimension\n",
130
- " processed_img = processed_img.unsqueeze(0) # This is necessary because the model expects a batch\n",
131
- "\n",
132
- " return processed_img\n"
133
- ]
134
- },
135
- {
136
- "cell_type": "code",
137
- "execution_count": 1,
138
- "metadata": {},
139
- "outputs": [
140
- {
141
- "name": "stderr",
142
- "output_type": "stream",
143
- "text": [
144
- "/home/mahnaz/mlprojects/bloom_classifier/ven_bloom_gradio/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
145
- " from .autonotebook import tqdm as notebook_tqdm\n"
146
- ]
147
- }
148
- ],
149
- "source": [
150
- "import gradio as gr\n",
151
- "import json\n",
152
- "from transformers import pipeline\n",
153
- "\n",
154
- "\n",
155
- "def load_label_to_name_mapping(json_file_path):\n",
156
- " \"\"\"Load the label-to-name mapping from a JSON file.\"\"\"\n",
157
- " with open(json_file_path, 'r') as f:\n",
158
- " mapping = json.load(f)\n",
159
- " return {int(k): v for k, v in mapping.items()}\n",
160
- "\n",
161
- "def infer_flower_name(classifier, image):\n",
162
- " \"\"\"Perform inference on an image and return the flower name.\"\"\"\n",
163
- " # Perform inference\n",
164
- " # Load the model checkpoint for inference\n",
165
- " \n",
166
- " result = classifier(image)\n",
167
- " # Get the label from the inference result\n",
168
- " label = result[0]['label'].split('_')[-1] # The label is usually in the format 'LABEL_#'\n",
169
- " label = int(label)\n",
170
- " \n",
171
- " # Map the integer label to the flower name\n",
172
- " json_file_path = 'label_to_name.json'\n",
173
- " label_to_name = load_label_to_name_mapping(json_file_path)\n",
174
- " flower_name = label_to_name.get(label, \"Unknown\")\n",
175
- " \n",
176
- " return flower_name\n",
177
- "\n",
178
- "\n",
179
- "\n",
180
- "def predict(prompt_img):# would call a model to make a prediction on an input and return the output.\n",
181
- "\n",
182
- " # Instantiate the AutoImageProcessor\n",
183
- " #image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n",
184
- "\n",
185
- " # Preprocess the input image\n",
186
- " #image_path = 'path/to/your/image.jpg'\n",
187
- " #processed_img = preprocess_input(prompt_img, image_processor)\n",
188
- " processed_img= prompt_img \n",
189
- " classifier = pipeline(\"image-classification\", model=\"checkpoint-160\")\n",
190
- " flower_name = infer_flower_name(classifier, processed_img)\n",
191
- " return flower_name\n",
192
- "demo = gr.Interface(fn=predict, \n",
193
- " inputs=gr.Image(type=\"pil\"), \n",
194
- " outputs=gr.Label(num_top_classes=3),\n",
195
- " examples=[\"example.jpg\"])\n",
196
- "\n",
197
- "demo.launch()"
198
- ]
199
- }
200
- ],
201
- "metadata": {
202
- "kernelspec": {
203
- "display_name": "venv_bloom-classifier",
204
- "language": "python",
205
- "name": "python3"
206
- },
207
- "language_info": {
208
- "codemirror_mode": {
209
- "name": "ipython",
210
- "version": 3
211
- },
212
- "file_extension": ".py",
213
- "mimetype": "text/x-python",
214
- "name": "python",
215
- "nbconvert_exporter": "python",
216
- "pygments_lexer": "ipython3",
217
- "version": "3.11.3"
218
- },
219
- "orig_nbformat": 4
220
- },
221
- "nbformat": 4,
222
- "nbformat_minor": 2
223
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -26,25 +26,22 @@ def infer_flower_name(classifier, image):
26
 
27
  return flower_name
28
 
29
- def predict(flower):# would call a model to make a prediction on an input and return the output.
30
  classifier = pipeline("image-classification", model="checkpoint-160")
31
  flower_name = infer_flower_name(classifier, flower)
32
  return flower_name
33
 
34
- #def predict2(flower2): # output top 3 with prob?
35
- # classifier = pipeline("image-classification", model="checkpoint-160")
36
- # result = classifier(flower2)
37
- # print(result)
38
- # return result
39
 
40
  description = "Upload an image of a flower and discover its species!"
41
  title = "Bloom Classifier"
42
- examples = ["example.jpg", "image_00293.jpg","image_02828.jpg"]
43
  demo = gr.Interface(fn=predict,
44
  inputs=gr.Image(type="pil"),
45
  outputs=gr.Label(num_top_classes=3),
46
  description=description,
47
  title = title,
 
 
48
  examples=examples)
49
 
50
  demo.launch()
 
26
 
27
  return flower_name
28
 
29
+ def predict(flower): # would call a model to make a prediction on an input and return the output.
30
  classifier = pipeline("image-classification", model="checkpoint-160")
31
  flower_name = infer_flower_name(classifier, flower)
32
  return flower_name
33
 
 
 
 
 
 
34
 
35
  description = "Upload an image of a flower and discover its species!"
36
  title = "Bloom Classifier"
37
+ examples = ["examples/example.jpg", "examples/image_00293.jpg","examples/image_02828.jpg"]
38
  demo = gr.Interface(fn=predict,
39
  inputs=gr.Image(type="pil"),
40
  outputs=gr.Label(num_top_classes=3),
41
  description=description,
42
  title = title,
43
+ live = False,
44
+ share=True,
45
  examples=examples)
46
 
47
  demo.launch()
example.jpg β†’ examples/example.jpg RENAMED
File without changes
image_00293.jpg β†’ examples/image_00293.jpg RENAMED
File without changes
image_02828.jpg β†’ examples/image_02828.jpg RENAMED
File without changes