AmrElsayeh commited on
Commit
fdf135d
1 Parent(s): df13236

Upload 9 files

Browse files
Files changed (10) hide show
  1. .gitattributes +1 -0
  2. app.ipynb +416 -0
  3. app.py +22 -0
  4. classical.jpg +0 -0
  5. earthy.jpg +0 -0
  6. japandi.jpg +0 -0
  7. minimal.jpg +3 -0
  8. model.pkl +3 -0
  9. poho.jpg +0 -0
  10. requirements.txt +2 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ minimal.jpg filter=lfs diff=lfs merge=lfs -text
app.ipynb ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "18acb717",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "#|default_exp app"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "44eb0ad3",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "#|export\n",
21
+ "from fastai.vision.all import *\n",
22
+ "import gradio as gr\n",
23
+ "\n",
24
+ "def is_classical(x): return x[0].isupper()"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "code",
29
+ "execution_count": null,
30
+ "id": "d838c0b3",
31
+ "metadata": {},
32
+ "outputs": [],
33
+ "source": [
34
+ "path = untar_data(URLs.PETS)/'images'\n",
35
+ "\n",
36
+ "dls = ImageDataLoaders.from_name_func('.',\n",
37
+ " get_image_files(path), valid_pct=0.2, seed=42,\n",
38
+ " label_func=is_classical,\n",
39
+ " item_tfms=Resize(192, method='squish'))"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "id": "c107f724",
46
+ "metadata": {},
47
+ "outputs": [
48
+ {
49
+ "data": {
50
+ "text/html": [
51
+ "\n",
52
+ "<style>\n",
53
+ " /* Turns off some styling */\n",
54
+ " progress {\n",
55
+ " /* gets rid of default border in Firefox and Opera. */\n",
56
+ " border: none;\n",
57
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
58
+ " background-size: auto;\n",
59
+ " }\n",
60
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
61
+ " background: #F44336;\n",
62
+ " }\n",
63
+ "</style>\n"
64
+ ],
65
+ "text/plain": [
66
+ "<IPython.core.display.HTML object>"
67
+ ]
68
+ },
69
+ "metadata": {},
70
+ "output_type": "display_data"
71
+ },
72
+ {
73
+ "data": {
74
+ "text/html": [
75
+ "<table border=\"1\" class=\"dataframe\">\n",
76
+ " <thead>\n",
77
+ " <tr style=\"text-align: left;\">\n",
78
+ " <th>epoch</th>\n",
79
+ " <th>train_loss</th>\n",
80
+ " <th>valid_loss</th>\n",
81
+ " <th>error_rate</th>\n",
82
+ " <th>time</th>\n",
83
+ " </tr>\n",
84
+ " </thead>\n",
85
+ " <tbody>\n",
86
+ " <tr>\n",
87
+ " <td>0</td>\n",
88
+ " <td>0.209574</td>\n",
89
+ " <td>0.081121</td>\n",
90
+ " <td>0.022327</td>\n",
91
+ " <td>00:24</td>\n",
92
+ " </tr>\n",
93
+ " </tbody>\n",
94
+ "</table>"
95
+ ],
96
+ "text/plain": [
97
+ "<IPython.core.display.HTML object>"
98
+ ]
99
+ },
100
+ "metadata": {},
101
+ "output_type": "display_data"
102
+ },
103
+ {
104
+ "data": {
105
+ "text/html": [
106
+ "\n",
107
+ "<style>\n",
108
+ " /* Turns off some styling */\n",
109
+ " progress {\n",
110
+ " /* gets rid of default border in Firefox and Opera. */\n",
111
+ " border: none;\n",
112
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
113
+ " background-size: auto;\n",
114
+ " }\n",
115
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
116
+ " background: #F44336;\n",
117
+ " }\n",
118
+ "</style>\n"
119
+ ],
120
+ "text/plain": [
121
+ "<IPython.core.display.HTML object>"
122
+ ]
123
+ },
124
+ "metadata": {},
125
+ "output_type": "display_data"
126
+ },
127
+ {
128
+ "data": {
129
+ "text/html": [
130
+ "<table border=\"1\" class=\"dataframe\">\n",
131
+ " <thead>\n",
132
+ " <tr style=\"text-align: left;\">\n",
133
+ " <th>epoch</th>\n",
134
+ " <th>train_loss</th>\n",
135
+ " <th>valid_loss</th>\n",
136
+ " <th>error_rate</th>\n",
137
+ " <th>time</th>\n",
138
+ " </tr>\n",
139
+ " </thead>\n",
140
+ " <tbody>\n",
141
+ " <tr>\n",
142
+ " <td>0</td>\n",
143
+ " <td>0.090262</td>\n",
144
+ " <td>0.056602</td>\n",
145
+ " <td>0.017591</td>\n",
146
+ " <td>00:23</td>\n",
147
+ " </tr>\n",
148
+ " <tr>\n",
149
+ " <td>1</td>\n",
150
+ " <td>0.035389</td>\n",
151
+ " <td>0.037754</td>\n",
152
+ " <td>0.014208</td>\n",
153
+ " <td>00:22</td>\n",
154
+ " </tr>\n",
155
+ " <tr>\n",
156
+ " <td>2</td>\n",
157
+ " <td>0.013607</td>\n",
158
+ " <td>0.038817</td>\n",
159
+ " <td>0.012179</td>\n",
160
+ " <td>00:22</td>\n",
161
+ " </tr>\n",
162
+ " </tbody>\n",
163
+ "</table>"
164
+ ],
165
+ "text/plain": [
166
+ "<IPython.core.display.HTML object>"
167
+ ]
168
+ },
169
+ "metadata": {},
170
+ "output_type": "display_data"
171
+ }
172
+ ],
173
+ "source": [
174
+ "learn = vision_learner(dls, resnet18, metrics=error_rate)\n",
175
+ "learn.fine_tune(3)"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": null,
181
+ "id": "5171c7fc",
182
+ "metadata": {},
183
+ "outputs": [],
184
+ "source": [
185
+ "learn.export('model.pkl')"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": null,
191
+ "id": "ae2bc6ac",
192
+ "metadata": {},
193
+ "outputs": [],
194
+ "source": [
195
+ "#|export\n",
196
+ "learn = load_learner('model.pkl')"
197
+ ]
198
+ },
199
+ {
200
+ "cell_type": "code",
201
+ "execution_count": null,
202
+ "id": "6e0bf9da",
203
+ "metadata": {},
204
+ "outputs": [
205
+ {
206
+ "data": {
207
+ "text/html": [
208
+ "\n",
209
+ "<style>\n",
210
+ " /* Turns off some styling */\n",
211
+ " progress {\n",
212
+ " /* gets rid of default border in Firefox and Opera. */\n",
213
+ " border: none;\n",
214
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
215
+ " background-size: auto;\n",
216
+ " }\n",
217
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
218
+ " background: #F44336;\n",
219
+ " }\n",
220
+ "</style>\n"
221
+ ],
222
+ "text/plain": [
223
+ "<IPython.core.display.HTML object>"
224
+ ]
225
+ },
226
+ "metadata": {},
227
+ "output_type": "display_data"
228
+ },
229
+ {
230
+ "data": {
231
+ "text/html": [],
232
+ "text/plain": [
233
+ "<IPython.core.display.HTML object>"
234
+ ]
235
+ },
236
+ "metadata": {},
237
+ "output_type": "display_data"
238
+ },
239
+ {
240
+ "data": {
241
+ "text/plain": [
242
+ "('False', TensorBase(0), TensorBase([9.9999e-01, 8.4523e-06]))"
243
+ ]
244
+ },
245
+ "execution_count": null,
246
+ "metadata": {},
247
+ "output_type": "execute_result"
248
+ }
249
+ ],
250
+ "source": [
251
+ "learn.predict(im)"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "code",
256
+ "execution_count": null,
257
+ "id": "0419ed3a",
258
+ "metadata": {},
259
+ "outputs": [],
260
+ "source": [
261
+ "#|export\n",
262
+ "categories = ('classical','japandi','minimal','poho','earthy')\n",
263
+ "\n",
264
+ "def classify_image(img):\n",
265
+ " pred,idx,probs = learn.predict(img)\n",
266
+ " return dict(zip(categories, map(float,probs)))"
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": null,
272
+ "id": "762dec00",
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "data": {
277
+ "text/html": [
278
+ "\n",
279
+ "<style>\n",
280
+ " /* Turns off some styling */\n",
281
+ " progress {\n",
282
+ " /* gets rid of default border in Firefox and Opera. */\n",
283
+ " border: none;\n",
284
+ " /* Needs to be in here for Safari polyfill so background images work as expected. */\n",
285
+ " background-size: auto;\n",
286
+ " }\n",
287
+ " .progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {\n",
288
+ " background: #F44336;\n",
289
+ " }\n",
290
+ "</style>\n"
291
+ ],
292
+ "text/plain": [
293
+ "<IPython.core.display.HTML object>"
294
+ ]
295
+ },
296
+ "metadata": {},
297
+ "output_type": "display_data"
298
+ },
299
+ {
300
+ "data": {
301
+ "text/html": [],
302
+ "text/plain": [
303
+ "<IPython.core.display.HTML object>"
304
+ ]
305
+ },
306
+ "metadata": {},
307
+ "output_type": "display_data"
308
+ },
309
+ {
310
+ "data": {
311
+ "text/plain": [
312
+ "{'Dog': 0.9999915361404419, 'Cat': 8.452258043689653e-06}"
313
+ ]
314
+ },
315
+ "execution_count": null,
316
+ "metadata": {},
317
+ "output_type": "execute_result"
318
+ }
319
+ ],
320
+ "source": [
321
+ "classify_image(im)"
322
+ ]
323
+ },
324
+ {
325
+ "cell_type": "code",
326
+ "execution_count": null,
327
+ "id": "0518a30a",
328
+ "metadata": {},
329
+ "outputs": [
330
+ {
331
+ "name": "stdout",
332
+ "output_type": "stream",
333
+ "text": [
334
+ "Running on local URL: http://127.0.0.1:7860/\n",
335
+ "\n",
336
+ "To create a public link, set `share=True` in `launch()`.\n"
337
+ ]
338
+ },
339
+ {
340
+ "data": {
341
+ "text/plain": [
342
+ "(<fastapi.applications.FastAPI at 0x7fa03ba47670>,\n",
343
+ " 'http://127.0.0.1:7860/',\n",
344
+ " None)"
345
+ ]
346
+ },
347
+ "execution_count": null,
348
+ "metadata": {},
349
+ "output_type": "execute_result"
350
+ }
351
+ ],
352
+ "source": [
353
+ "#|export\n",
354
+ "image = gr.inputs.Image(shape=(192, 192))\n",
355
+ "label = gr.outputs.Label()\n",
356
+ "examples = ['classical.jpg','japandi.jpg','minimal.jpg','poho.jpg','earthy.jpg']\n",
357
+ "\n",
358
+ "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)\n",
359
+ "intf.launch(inline=False)"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "markdown",
364
+ "id": "0d1e90ce",
365
+ "metadata": {},
366
+ "source": [
367
+ "## end -"
368
+ ]
369
+ },
370
+ {
371
+ "cell_type": "code",
372
+ "execution_count": null,
373
+ "id": "82774c08",
374
+ "metadata": {},
375
+ "outputs": [],
376
+ "source": [
377
+ "from nbdev.export import notebook2script"
378
+ ]
379
+ },
380
+ {
381
+ "cell_type": "code",
382
+ "execution_count": null,
383
+ "id": "7a880da1",
384
+ "metadata": {},
385
+ "outputs": [
386
+ {
387
+ "name": "stdout",
388
+ "output_type": "stream",
389
+ "text": [
390
+ "Converted app.ipynb.\n"
391
+ ]
392
+ }
393
+ ],
394
+ "source": [
395
+ "notebook2script('app.ipynb')"
396
+ ]
397
+ },
398
+ {
399
+ "cell_type": "code",
400
+ "execution_count": null,
401
+ "id": "1a349335",
402
+ "metadata": {},
403
+ "outputs": [],
404
+ "source": []
405
+ }
406
+ ],
407
+ "metadata": {
408
+ "kernelspec": {
409
+ "display_name": "Python 3 (ipykernel)",
410
+ "language": "python",
411
+ "name": "python3"
412
+ }
413
+ },
414
+ "nbformat": 4,
415
+ "nbformat_minor": 5
416
+ }
app.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastai.vision.all import *
2
+ import gradio as gr
3
+
4
+ def is_classical(x): return x[0].isupper()
5
+
6
+ # Cell
7
+ learn = load_learner('model.pkl')
8
+
9
+ # Cell
10
+ categories = ('classical','japandi','minimal','poho','earthy')
11
+
12
+ def classify_image(img):
13
+ pred,idx,probs = learn.predict(img)
14
+ return dict(zip(categories, map(float,probs)))
15
+
16
+ # Cell
17
+ image = gr.inputs.Image(shape=(192, 192))
18
+ label = gr.outputs.Label()
19
+ examples = ['classical.jpg','japandi.jpg','minimal.jpg','poho.jpg','earthy.jpg']
20
+
21
+ intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
22
+ intf.launch()
classical.jpg ADDED
earthy.jpg ADDED
japandi.jpg ADDED
minimal.jpg ADDED

Git LFS Details

  • SHA256: 81779dcafa437f0cbb92c4e999f3f0af358b040dbca048b884e798c2243a6868
  • Pointer size: 132 Bytes
  • Size of remote file: 2.61 MB
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7383370dedc48dec165f4fe7c9d3962ec00be847f65261b73c4e726912309592
3
+ size 102888099
poho.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ fastai
2
+