Curt Tigges commited on
Commit
2c8c2f8
1 Parent(s): 65a728c

added app file

Browse files
Anime_Image_Label_Inference.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
anime_image_label_inference.py CHANGED
@@ -1,23 +1,8 @@
1
- # -*- coding: utf-8 -*-
2
- """Anime Image Label Inference.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1BxPfM2uV54LeiQGEk43xcyNwMgqwCR80
8
- """
9
-
10
- !pip install -Uqq gradio
11
-
12
- !pip install -Uqq fastbook
13
  import fastbook
14
  fastbook.setup_book()
15
-
16
- import gradio as gr
17
  from fastbook import *
18
 
19
- path = Path('gdrive/MyDrive/anime-image-labeller/safebooru')
20
-
21
  """
22
  Get the prediction labels and their accuracies, then return the results as a dictionary.
23
 
@@ -28,7 +13,7 @@ Get the prediction labels and their accuracies, then return the results as a dic
28
  def get_pred_classes(obj, learn, thresh):
29
  labels = []
30
  # get list of classes from csv--replace
31
- with open(path/'classes.txt', 'r') as f:
32
  for line in f:
33
  labels.append(line.strip('\n'))
34
 
@@ -44,10 +29,10 @@ def get_pred_classes(obj, learn, thresh):
44
 
45
  return predictions
46
 
47
- def get_x(r): return path/'images'/r['img_name']
48
  def get_y(r): return [t for t in r['tags'].split(' ') if t in pop_tags]
49
 
50
- learn = load_learner(path/'model-large-40e.pkl')
51
 
52
  def predict_single_img(imf, thresh=0.2, learn=learn):
53
 
@@ -58,9 +43,11 @@ def predict_single_img(imf, thresh=0.2, learn=learn):
58
  img.show() #show image
59
  return str(get_pred_classes(pred_pct, learn, thresh))
60
 
61
- predict_single_img(path/'test/mask.jpeg')
62
 
 
63
  iface = gr.Interface(fn=predict_single_img,
64
  inputs=["image","number"],
65
  outputs="text")
66
- iface.launch()
 
 
1
+ #import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
2
  import fastbook
3
  fastbook.setup_book()
 
 
4
  from fastbook import *
5
 
 
 
6
  """
7
  Get the prediction labels and their accuracies, then return the results as a dictionary.
8
 
 
13
  def get_pred_classes(obj, learn, thresh):
14
  labels = []
15
  # get list of classes from csv--replace
16
+ with open('classes.txt', 'r') as f:
17
  for line in f:
18
  labels.append(line.strip('\n'))
19
 
 
29
 
30
  return predictions
31
 
32
+ def get_x(r): return 'images'/r['img_name']
33
  def get_y(r): return [t for t in r['tags'].split(' ') if t in pop_tags]
34
 
35
+ learn = load_learner('model-large-40e.pkl')
36
 
37
  def predict_single_img(imf, thresh=0.2, learn=learn):
38
 
 
43
  img.show() #show image
44
  return str(get_pred_classes(pred_pct, learn, thresh))
45
 
46
+ predict_single_img('test/mask.jpeg')
47
 
48
+ """
49
  iface = gr.Interface(fn=predict_single_img,
50
  inputs=["image","number"],
51
  outputs="text")
52
+ iface.launch()
53
+ """
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import fastbook
3
+ fastbook.setup_book()
4
+ from fastbook import *
5
+
6
+ """
7
+ Get the prediction labels and their accuracies, then return the results as a dictionary.
8
+
9
+ [obj] - tensor matrix containing the predicted accuracy given from the model
10
+ [learn] - fastai learner needed to get the labels
11
+ [thresh] - minimum accuracy threshold to returning results
12
+ """
13
+ def get_pred_classes(obj, learn, thresh):
14
+ labels = []
15
+ # get list of classes from csv--replace
16
+ with open('classes.txt', 'r') as f:
17
+ for line in f:
18
+ labels.append(line.strip('\n'))
19
+
20
+ predictions = {}
21
+ x=0
22
+ for item in obj:
23
+ acc= round(item.item(), 3)
24
+ if acc > thresh:
25
+ predictions[labels[x]] = round(acc, 3)
26
+ x+=1
27
+
28
+ predictions =sorted(predictions.items(), key=lambda x: x[1], reverse=True)
29
+
30
+ return predictions
31
+
32
+ def get_x(r): return 'images'/r['img_name']
33
+ def get_y(r): return [t for t in r['tags'].split(' ') if t in pop_tags]
34
+
35
+ learn = load_learner(fname='model-large-40e.pkl')
36
+
37
+ def predict_single_img(imf, thresh=0.2, learn=learn):
38
+
39
+ img = PILImage.create(imf)
40
+
41
+ #img.show() #show image
42
+ _, _, pred_pct = learn.predict(img) #predict while ignoring first 2 array inputs
43
+ img.show() #show image
44
+ return str(get_pred_classes(pred_pct, learn, thresh))
45
+
46
+ #predict_single_img('test/mask.jpeg')
47
+
48
+ iface = gr.Interface(fn=predict_single_img,
49
+ inputs=["image","number"],
50
+ outputs="text")
51
+ iface.launch()