ThisIsM commited on
Commit
7f0c8d2
·
1 Parent(s): 16fae5f

Add requirements.txt file.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-160/optimizer.pt filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-160/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ven_bloom_gradio/
2
+ app.ipynb/
app.ipynb ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/home/mahnaz/mlprojects/bloom_classifier/ven_bloom_gradio/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "import gradio as gr\n",
19
+ "import json\n",
20
+ "from transformers import pipeline\n",
21
+ "from transformers import AutoImageProcessor\n",
22
+ "from PIL import Image"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 10,
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "from PIL import Image\n",
32
+ "from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\n",
33
+ "import numpy as np\n",
34
+ "\n",
35
+ "def preprocess_input(input_data, image_processor):\n",
36
+ " \"\"\"\n",
37
+ " Preprocesses the input image for inference.\n",
38
+ "\n",
39
+ " Parameters:\n",
40
+ " input_data (str or np.ndarray): Path to the image file in .jpg format or a NumPy array.\n",
41
+ " image_processor (AutoImageProcessor): An instance of AutoImageProcessor from the model's checkpoint.\n",
42
+ "\n",
43
+ " Returns:\n",
44
+ " processed_img (torch.Tensor): Preprocessed image ready for inference.\n",
45
+ " \"\"\"\n",
46
+ " # Load the image based on the input type\n",
47
+ " if isinstance(input_data, str):\n",
48
+ " img = Image.open(input_data).convert('RGB')\n",
49
+ " elif isinstance(input_data, np.ndarray):\n",
50
+ " img = Image.fromarray(input_data.astype('uint8'), 'RGB')\n",
51
+ " else:\n",
52
+ " raise ValueError(\"Unsupported input type. Only str and np.ndarray are supported.\")\n",
53
+ " \n",
54
+ " # Obtain the mean and std from image_processor\n",
55
+ " mean = image_processor.image_mean\n",
56
+ " std = image_processor.image_std\n",
57
+ " \n",
58
+ " # Obtain the image size from image_processor\n",
59
+ " size = (\n",
60
+ " image_processor.size[\"shortest_edge\"]\n",
61
+ " if \"shortest_edge\" in image_processor.size\n",
62
+ " else (image_processor.size[\"height\"], image_processor.size[\"width\"])\n",
63
+ " )\n",
64
+ " \n",
65
+ " # Define the transformations\n",
66
+ " preprocess = Compose([\n",
67
+ " Resize(size), # Resizing to the same size used during training\n",
68
+ " CenterCrop(size), # Center cropping to the same size used during training\n",
69
+ " ToTensor(),\n",
70
+ " Normalize(mean=mean, std=std)\n",
71
+ " ])\n",
72
+ " \n",
73
+ " # Apply the transformations\n",
74
+ " processed_img = preprocess(img)\n",
75
+ " \n",
76
+ " # Add a batch dimension\n",
77
+ " processed_img = processed_img.unsqueeze(0) # This is necessary because the model expects a batch\n",
78
+ " to_pil = ToPILImage()\n",
79
+ " processed_img = to_pil(processed_img)\n",
80
+ "\n",
81
+ " return processed_img\n"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": 13,
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "from PIL import Image\n",
91
+ "from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\n",
92
+ "\n",
93
+ "def preprocess_input(image_path, image_processor):\n",
94
+ " \"\"\"\n",
95
+ " Preprocesses the input image for inference.\n",
96
+ "\n",
97
+ " Parameters:\n",
98
+ " image_path (str): Path to the image file in .jpg format.\n",
99
+ " image_processor (AutoImageProcessor): An instance of AutoImageProcessor from the model's checkpoint.\n",
100
+ "\n",
101
+ " Returns:\n",
102
+ " processed_img (torch.Tensor): Preprocessed image ready for inference.\n",
103
+ " \"\"\"\n",
104
+ " # Load the image\n",
105
+ " img = Image.open(image_path).convert('RGB')\n",
106
+ " \n",
107
+ " # Obtain the mean and std from image_processor\n",
108
+ " mean = image_processor.image_mean\n",
109
+ " std = image_processor.image_std\n",
110
+ " \n",
111
+ " # Obtain the image size from image_processor\n",
112
+ " size = (\n",
113
+ " image_processor.size[\"shortest_edge\"]\n",
114
+ " if \"shortest_edge\" in image_processor.size\n",
115
+ " else (image_processor.size[\"height\"], image_processor.size[\"width\"])\n",
116
+ " )\n",
117
+ " \n",
118
+ " # Define the transformations\n",
119
+ " preprocess = Compose([\n",
120
+ " Resize(size), # Resizing to the same size used during training\n",
121
+ " CenterCrop(size), # Center cropping to the same size used during training\n",
122
+ " ToTensor(),\n",
123
+ " Normalize(mean=mean, std=std)\n",
124
+ " ])\n",
125
+ " \n",
126
+ " # Apply the transformations\n",
127
+ " processed_img = preprocess(img)\n",
128
+ " \n",
129
+ " # Add a batch dimension\n",
130
+ " processed_img = processed_img.unsqueeze(0) # This is necessary because the model expects a batch\n",
131
+ "\n",
132
+ " return processed_img\n"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": 1,
138
+ "metadata": {},
139
+ "outputs": [
140
+ {
141
+ "name": "stderr",
142
+ "output_type": "stream",
143
+ "text": [
144
+ "/home/mahnaz/mlprojects/bloom_classifier/ven_bloom_gradio/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
145
+ " from .autonotebook import tqdm as notebook_tqdm\n"
146
+ ]
147
+ }
148
+ ],
149
+ "source": [
150
+ "import gradio as gr\n",
151
+ "import json\n",
152
+ "from transformers import pipeline\n",
153
+ "\n",
154
+ "\n",
155
+ "def load_label_to_name_mapping(json_file_path):\n",
156
+ " \"\"\"Load the label-to-name mapping from a JSON file.\"\"\"\n",
157
+ " with open(json_file_path, 'r') as f:\n",
158
+ " mapping = json.load(f)\n",
159
+ " return {int(k): v for k, v in mapping.items()}\n",
160
+ "\n",
161
+ "def infer_flower_name(classifier, image):\n",
162
+ " \"\"\"Perform inference on an image and return the flower name.\"\"\"\n",
163
+ " # Perform inference\n",
164
+ " # Load the model checkpoint for inference\n",
165
+ " \n",
166
+ " result = classifier(image)\n",
167
+ " # Get the label from the inference result\n",
168
+ " label = result[0]['label'].split('_')[-1] # The label is usually in the format 'LABEL_#'\n",
169
+ " label = int(label)\n",
170
+ " \n",
171
+ " # Map the integer label to the flower name\n",
172
+ " json_file_path = 'label_to_name.json'\n",
173
+ " label_to_name = load_label_to_name_mapping(json_file_path)\n",
174
+ " flower_name = label_to_name.get(label, \"Unknown\")\n",
175
+ " \n",
176
+ " return flower_name\n",
177
+ "\n",
178
+ "\n",
179
+ "\n",
180
+ "def predict(prompt_img):# would call a model to make a prediction on an input and return the output.\n",
181
+ "\n",
182
+ " # Instantiate the AutoImageProcessor\n",
183
+ " #image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n",
184
+ "\n",
185
+ " # Preprocess the input image\n",
186
+ " #image_path = 'path/to/your/image.jpg'\n",
187
+ " #processed_img = preprocess_input(prompt_img, image_processor)\n",
188
+ " processed_img= prompt_img \n",
189
+ " classifier = pipeline(\"image-classification\", model=\"checkpoint-160\")\n",
190
+ " flower_name = infer_flower_name(classifier, processed_img)\n",
191
+ " return flower_name\n",
192
+ "demo = gr.Interface(fn=predict, \n",
193
+ " inputs=gr.Image(type=\"pil\"), \n",
194
+ " outputs=gr.Label(num_top_classes=3),\n",
195
+ " examples=[\"example.jpg\"])\n",
196
+ "\n",
197
+ "demo.launch()"
198
+ ]
199
+ }
200
+ ],
201
+ "metadata": {
202
+ "kernelspec": {
203
+ "display_name": "venv_bloom-classifier",
204
+ "language": "python",
205
+ "name": "python3"
206
+ },
207
+ "language_info": {
208
+ "codemirror_mode": {
209
+ "name": "ipython",
210
+ "version": 3
211
+ },
212
+ "file_extension": ".py",
213
+ "mimetype": "text/x-python",
214
+ "name": "python",
215
+ "nbconvert_exporter": "python",
216
+ "pygments_lexer": "ipython3",
217
+ "version": "3.11.3"
218
+ },
219
+ "orig_nbformat": 4
220
+ },
221
+ "nbformat": 4,
222
+ "nbformat_minor": 2
223
+ }
checkpoint-160/config.json ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1",
14
+ "2": "LABEL_2",
15
+ "3": "LABEL_3",
16
+ "4": "LABEL_4",
17
+ "5": "LABEL_5",
18
+ "6": "LABEL_6",
19
+ "7": "LABEL_7",
20
+ "8": "LABEL_8",
21
+ "9": "LABEL_9",
22
+ "10": "LABEL_10",
23
+ "11": "LABEL_11",
24
+ "12": "LABEL_12",
25
+ "13": "LABEL_13",
26
+ "14": "LABEL_14",
27
+ "15": "LABEL_15",
28
+ "16": "LABEL_16",
29
+ "17": "LABEL_17",
30
+ "18": "LABEL_18",
31
+ "19": "LABEL_19",
32
+ "20": "LABEL_20",
33
+ "21": "LABEL_21",
34
+ "22": "LABEL_22",
35
+ "23": "LABEL_23",
36
+ "24": "LABEL_24",
37
+ "25": "LABEL_25",
38
+ "26": "LABEL_26",
39
+ "27": "LABEL_27",
40
+ "28": "LABEL_28",
41
+ "29": "LABEL_29",
42
+ "30": "LABEL_30",
43
+ "31": "LABEL_31",
44
+ "32": "LABEL_32",
45
+ "33": "LABEL_33",
46
+ "34": "LABEL_34",
47
+ "35": "LABEL_35",
48
+ "36": "LABEL_36",
49
+ "37": "LABEL_37",
50
+ "38": "LABEL_38",
51
+ "39": "LABEL_39",
52
+ "40": "LABEL_40",
53
+ "41": "LABEL_41",
54
+ "42": "LABEL_42",
55
+ "43": "LABEL_43",
56
+ "44": "LABEL_44",
57
+ "45": "LABEL_45",
58
+ "46": "LABEL_46",
59
+ "47": "LABEL_47",
60
+ "48": "LABEL_48",
61
+ "49": "LABEL_49",
62
+ "50": "LABEL_50",
63
+ "51": "LABEL_51",
64
+ "52": "LABEL_52",
65
+ "53": "LABEL_53",
66
+ "54": "LABEL_54",
67
+ "55": "LABEL_55",
68
+ "56": "LABEL_56",
69
+ "57": "LABEL_57",
70
+ "58": "LABEL_58",
71
+ "59": "LABEL_59",
72
+ "60": "LABEL_60",
73
+ "61": "LABEL_61",
74
+ "62": "LABEL_62",
75
+ "63": "LABEL_63",
76
+ "64": "LABEL_64",
77
+ "65": "LABEL_65",
78
+ "66": "LABEL_66",
79
+ "67": "LABEL_67",
80
+ "68": "LABEL_68",
81
+ "69": "LABEL_69",
82
+ "70": "LABEL_70",
83
+ "71": "LABEL_71",
84
+ "72": "LABEL_72",
85
+ "73": "LABEL_73",
86
+ "74": "LABEL_74",
87
+ "75": "LABEL_75",
88
+ "76": "LABEL_76",
89
+ "77": "LABEL_77",
90
+ "78": "LABEL_78",
91
+ "79": "LABEL_79",
92
+ "80": "LABEL_80",
93
+ "81": "LABEL_81",
94
+ "82": "LABEL_82",
95
+ "83": "LABEL_83",
96
+ "84": "LABEL_84",
97
+ "85": "LABEL_85",
98
+ "86": "LABEL_86",
99
+ "87": "LABEL_87",
100
+ "88": "LABEL_88",
101
+ "89": "LABEL_89",
102
+ "90": "LABEL_90",
103
+ "91": "LABEL_91",
104
+ "92": "LABEL_92",
105
+ "93": "LABEL_93",
106
+ "94": "LABEL_94",
107
+ "95": "LABEL_95",
108
+ "96": "LABEL_96",
109
+ "97": "LABEL_97",
110
+ "98": "LABEL_98",
111
+ "99": "LABEL_99",
112
+ "100": "LABEL_100",
113
+ "101": "LABEL_101"
114
+ },
115
+ "image_size": 224,
116
+ "initializer_range": 0.02,
117
+ "intermediate_size": 3072,
118
+ "label2id": {
119
+ "LABEL_0": 0,
120
+ "LABEL_1": 1,
121
+ "LABEL_10": 10,
122
+ "LABEL_100": 100,
123
+ "LABEL_101": 101,
124
+ "LABEL_11": 11,
125
+ "LABEL_12": 12,
126
+ "LABEL_13": 13,
127
+ "LABEL_14": 14,
128
+ "LABEL_15": 15,
129
+ "LABEL_16": 16,
130
+ "LABEL_17": 17,
131
+ "LABEL_18": 18,
132
+ "LABEL_19": 19,
133
+ "LABEL_2": 2,
134
+ "LABEL_20": 20,
135
+ "LABEL_21": 21,
136
+ "LABEL_22": 22,
137
+ "LABEL_23": 23,
138
+ "LABEL_24": 24,
139
+ "LABEL_25": 25,
140
+ "LABEL_26": 26,
141
+ "LABEL_27": 27,
142
+ "LABEL_28": 28,
143
+ "LABEL_29": 29,
144
+ "LABEL_3": 3,
145
+ "LABEL_30": 30,
146
+ "LABEL_31": 31,
147
+ "LABEL_32": 32,
148
+ "LABEL_33": 33,
149
+ "LABEL_34": 34,
150
+ "LABEL_35": 35,
151
+ "LABEL_36": 36,
152
+ "LABEL_37": 37,
153
+ "LABEL_38": 38,
154
+ "LABEL_39": 39,
155
+ "LABEL_4": 4,
156
+ "LABEL_40": 40,
157
+ "LABEL_41": 41,
158
+ "LABEL_42": 42,
159
+ "LABEL_43": 43,
160
+ "LABEL_44": 44,
161
+ "LABEL_45": 45,
162
+ "LABEL_46": 46,
163
+ "LABEL_47": 47,
164
+ "LABEL_48": 48,
165
+ "LABEL_49": 49,
166
+ "LABEL_5": 5,
167
+ "LABEL_50": 50,
168
+ "LABEL_51": 51,
169
+ "LABEL_52": 52,
170
+ "LABEL_53": 53,
171
+ "LABEL_54": 54,
172
+ "LABEL_55": 55,
173
+ "LABEL_56": 56,
174
+ "LABEL_57": 57,
175
+ "LABEL_58": 58,
176
+ "LABEL_59": 59,
177
+ "LABEL_6": 6,
178
+ "LABEL_60": 60,
179
+ "LABEL_61": 61,
180
+ "LABEL_62": 62,
181
+ "LABEL_63": 63,
182
+ "LABEL_64": 64,
183
+ "LABEL_65": 65,
184
+ "LABEL_66": 66,
185
+ "LABEL_67": 67,
186
+ "LABEL_68": 68,
187
+ "LABEL_69": 69,
188
+ "LABEL_7": 7,
189
+ "LABEL_70": 70,
190
+ "LABEL_71": 71,
191
+ "LABEL_72": 72,
192
+ "LABEL_73": 73,
193
+ "LABEL_74": 74,
194
+ "LABEL_75": 75,
195
+ "LABEL_76": 76,
196
+ "LABEL_77": 77,
197
+ "LABEL_78": 78,
198
+ "LABEL_79": 79,
199
+ "LABEL_8": 8,
200
+ "LABEL_80": 80,
201
+ "LABEL_81": 81,
202
+ "LABEL_82": 82,
203
+ "LABEL_83": 83,
204
+ "LABEL_84": 84,
205
+ "LABEL_85": 85,
206
+ "LABEL_86": 86,
207
+ "LABEL_87": 87,
208
+ "LABEL_88": 88,
209
+ "LABEL_89": 89,
210
+ "LABEL_9": 9,
211
+ "LABEL_90": 90,
212
+ "LABEL_91": 91,
213
+ "LABEL_92": 92,
214
+ "LABEL_93": 93,
215
+ "LABEL_94": 94,
216
+ "LABEL_95": 95,
217
+ "LABEL_96": 96,
218
+ "LABEL_97": 97,
219
+ "LABEL_98": 98,
220
+ "LABEL_99": 99
221
+ },
222
+ "layer_norm_eps": 1e-12,
223
+ "model_type": "vit",
224
+ "num_attention_heads": 12,
225
+ "num_channels": 3,
226
+ "num_hidden_layers": 12,
227
+ "patch_size": 16,
228
+ "problem_type": "single_label_classification",
229
+ "qkv_bias": true,
230
+ "torch_dtype": "float32",
231
+ "transformers_version": "4.31.0"
232
+ }
checkpoint-160/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:519cd64e2a8efa62e20ab1bda8b451a57f8df53139f98d2071b562d7d5f0d0eb
3
+ size 687133893
checkpoint-160/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-160/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c21929ae35fb001478ab9819490f3471c32533c7d8942f9cdcf8e115f2c0dbe
3
+ size 343576301
checkpoint-160/rng_state.pth ADDED
Binary file (14.6 kB). View file
 
checkpoint-160/scheduler.pt ADDED
Binary file (627 Bytes). View file
 
checkpoint-160/trainer_state.json ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9323529411764706,
3
+ "best_model_checkpoint": "vit_fulldataset/checkpoint-160",
4
+ "epoch": 5.0,
5
+ "global_step": 160,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.31,
12
+ "learning_rate": 2.5e-05,
13
+ "loss": 4.6284,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.62,
18
+ "learning_rate": 5e-05,
19
+ "loss": 4.5942,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.94,
24
+ "learning_rate": 4.709302325581396e-05,
25
+ "loss": 4.5291,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "eval_accuracy": 0.27450980392156865,
31
+ "eval_loss": 4.447311878204346,
32
+ "eval_runtime": 21.4513,
33
+ "eval_samples_per_second": 47.55,
34
+ "eval_steps_per_second": 5.967,
35
+ "step": 32
36
+ },
37
+ {
38
+ "epoch": 1.25,
39
+ "learning_rate": 4.418604651162791e-05,
40
+ "loss": 4.4148,
41
+ "step": 40
42
+ },
43
+ {
44
+ "epoch": 1.56,
45
+ "learning_rate": 4.127906976744187e-05,
46
+ "loss": 4.3267,
47
+ "step": 50
48
+ },
49
+ {
50
+ "epoch": 1.88,
51
+ "learning_rate": 3.837209302325582e-05,
52
+ "loss": 4.2447,
53
+ "step": 60
54
+ },
55
+ {
56
+ "epoch": 2.0,
57
+ "eval_accuracy": 0.7598039215686274,
58
+ "eval_loss": 4.18092679977417,
59
+ "eval_runtime": 21.9573,
60
+ "eval_samples_per_second": 46.454,
61
+ "eval_steps_per_second": 5.829,
62
+ "step": 64
63
+ },
64
+ {
65
+ "epoch": 2.19,
66
+ "learning_rate": 3.5465116279069774e-05,
67
+ "loss": 4.1564,
68
+ "step": 70
69
+ },
70
+ {
71
+ "epoch": 2.5,
72
+ "learning_rate": 3.2558139534883724e-05,
73
+ "loss": 4.0352,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 2.81,
78
+ "learning_rate": 2.9651162790697678e-05,
79
+ "loss": 3.9991,
80
+ "step": 90
81
+ },
82
+ {
83
+ "epoch": 3.0,
84
+ "eval_accuracy": 0.884313725490196,
85
+ "eval_loss": 3.975513219833374,
86
+ "eval_runtime": 22.2832,
87
+ "eval_samples_per_second": 45.774,
88
+ "eval_steps_per_second": 5.744,
89
+ "step": 96
90
+ },
91
+ {
92
+ "epoch": 3.12,
93
+ "learning_rate": 2.674418604651163e-05,
94
+ "loss": 3.9439,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 3.44,
99
+ "learning_rate": 2.3837209302325582e-05,
100
+ "loss": 3.8316,
101
+ "step": 110
102
+ },
103
+ {
104
+ "epoch": 3.75,
105
+ "learning_rate": 2.0930232558139536e-05,
106
+ "loss": 3.8242,
107
+ "step": 120
108
+ },
109
+ {
110
+ "epoch": 4.0,
111
+ "eval_accuracy": 0.9303921568627451,
112
+ "eval_loss": 3.8248324394226074,
113
+ "eval_runtime": 21.9121,
114
+ "eval_samples_per_second": 46.55,
115
+ "eval_steps_per_second": 5.842,
116
+ "step": 128
117
+ },
118
+ {
119
+ "epoch": 4.06,
120
+ "learning_rate": 1.802325581395349e-05,
121
+ "loss": 3.7498,
122
+ "step": 130
123
+ },
124
+ {
125
+ "epoch": 4.38,
126
+ "learning_rate": 1.5116279069767441e-05,
127
+ "loss": 3.702,
128
+ "step": 140
129
+ },
130
+ {
131
+ "epoch": 4.69,
132
+ "learning_rate": 1.2209302325581395e-05,
133
+ "loss": 3.6716,
134
+ "step": 150
135
+ },
136
+ {
137
+ "epoch": 5.0,
138
+ "learning_rate": 9.302325581395349e-06,
139
+ "loss": 3.6785,
140
+ "step": 160
141
+ },
142
+ {
143
+ "epoch": 5.0,
144
+ "eval_accuracy": 0.9323529411764706,
145
+ "eval_loss": 3.7415974140167236,
146
+ "eval_runtime": 22.0515,
147
+ "eval_samples_per_second": 46.255,
148
+ "eval_steps_per_second": 5.805,
149
+ "step": 160
150
+ }
151
+ ],
152
+ "max_steps": 192,
153
+ "num_train_epochs": 6,
154
+ "total_flos": 3.955633604923392e+17,
155
+ "trial_name": null,
156
+ "trial_params": null
157
+ }
checkpoint-160/training_args.bin ADDED
Binary file (3.96 kB). View file
 
example.jpg ADDED
flagged/log.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ flower,output,flag,username,timestamp
2
+ ,/home/mahnaz/mlprojects/bloom_classifier/flagged/output/tmpeab6hesr.json,,,2023-09-05 10:48:54.077094
flagged/output/tmpeab6hesr.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
image_00293.jpg ADDED
image_02828.jpg ADDED
label_to_name.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": "pink primrose",
3
+ "1": "hard-leaved pocket orchid",
4
+ "2": "canterbury bells",
5
+ "3": "sweet pea",
6
+ "4": "english marigold",
7
+ "5": "tiger lily",
8
+ "6": "moon orchid",
9
+ "7": "bird of paradise",
10
+ "8": "monkshood",
11
+ "9": "globe thistle",
12
+ "10": "snapdragon",
13
+ "11": "colt's foot",
14
+ "12": "king protea",
15
+ "13": "spear thistle",
16
+ "14": "yellow iris",
17
+ "15": "globe-flower",
18
+ "16": "purple coneflower",
19
+ "17": "peruvian lily",
20
+ "18": "balloon flower",
21
+ "19": "giant white arum lily",
22
+ "20": "fire lily",
23
+ "21": "pincushion flower",
24
+ "22": "fritillary",
25
+ "23": "red ginger",
26
+ "24": "grape hyacinth",
27
+ "25": "corn poppy",
28
+ "26": "prince of wales feathers",
29
+ "27": "stemless gentian",
30
+ "28": "artichoke",
31
+ "29": "sweet william",
32
+ "30": "carnation",
33
+ "31": "garden phlox",
34
+ "32": "love in the mist",
35
+ "33": "mexican aster",
36
+ "34": "alpine sea holly",
37
+ "35": "ruby-lipped cattleya",
38
+ "36": "cape flower",
39
+ "37": "great masterwort",
40
+ "38": "siam tulip",
41
+ "39": "lenten rose",
42
+ "40": "barbeton daisy",
43
+ "41": "daffodil",
44
+ "42": "sword lily",
45
+ "43": "poinsettia",
46
+ "44": "bolero deep blue",
47
+ "45": "wallflower",
48
+ "46": "marigold",
49
+ "47": "buttercup",
50
+ "48": "oxeye daisy",
51
+ "49": "common dandelion",
52
+ "50": "petunia",
53
+ "51": "wild pansy",
54
+ "52": "primula",
55
+ "53": "sunflower",
56
+ "54": "pelargonium",
57
+ "55": "bishop of llandaff",
58
+ "56": "gaura",
59
+ "57": "geranium",
60
+ "58": "orange dahlia",
61
+ "59": "pink-yellow dahlia",
62
+ "60": "cautleya spicata",
63
+ "61": "japanese anemone",
64
+ "62": "black-eyed susan",
65
+ "63": "silverbush",
66
+ "64": "californian poppy",
67
+ "65": "osteospermum",
68
+ "66": "spring crocus",
69
+ "67": "bearded iris",
70
+ "68": "windflower",
71
+ "69": "tree poppy",
72
+ "70": "gazania",
73
+ "71": "azalea",
74
+ "72": "water lily",
75
+ "73": "rose",
76
+ "74": "thorn apple",
77
+ "75": "morning glory",
78
+ "76": "passion flower",
79
+ "77": "lotus lotus",
80
+ "78": "toad lily",
81
+ "79": "anthurium",
82
+ "80": "frangipani",
83
+ "81": "clematis",
84
+ "82": "hibiscus",
85
+ "83": "columbine",
86
+ "84": "desert-rose",
87
+ "85": "tree mallow",
88
+ "86": "magnolia",
89
+ "87": "cyclamen",
90
+ "88": "watercress",
91
+ "89": "canna lily",
92
+ "90": "hippeastrum",
93
+ "91": "bee balm",
94
+ "92": "ball moss",
95
+ "93": "foxglove",
96
+ "94": "bougainvillea",
97
+ "95": "camellia",
98
+ "96": "mallow",
99
+ "97": "mexican petunia",
100
+ "98": "bromelia",
101
+ "99": "blanket flower",
102
+ "100": "trumpet creeper",
103
+ "101": "blackberry lily"
104
+ }
requirements.txt ADDED
File without changes