shukurullo2004 commited on
Commit
c7fc65b
Β·
verified Β·
1 Parent(s): 37d1b34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -1,9 +1,9 @@
 
1
  ### 1. Imports and class names setup ###
2
  import gradio as gr
3
  import os
4
  import torch
5
 
6
-
7
  from model import create_effnetb2_model
8
  from timeit import default_timer as timer
9
  from typing import Tuple, Dict
@@ -21,7 +21,7 @@ effnetb2, effnetb2_transforms = create_effnetb2_model(
21
  # Load saved weights
22
  effnetb2.load_state_dict(
23
  torch.load(
24
- f="food_model.pth",
25
  map_location=torch.device("cpu"), # load to CPU
26
  )
27
  )
@@ -55,22 +55,24 @@ def predict(img) -> Tuple[Dict, float]:
55
 
56
  ### 4. Gradio app ###
57
 
58
- # import gradio as gr
59
- # example_list = [["examples/" + example] for example in os.listdir("examples")]
60
  # Create title, description and article strings
61
- title = "Shukurullo FoodVision Mini πŸ•πŸ₯©πŸ£"
62
  description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
63
- article = "Created by Shukurullo Meliboev at [09. PyTorch Model Deployment]"
 
 
 
64
 
65
  # Create the Gradio demo
66
  demo = gr.Interface(fn=predict, # mapping function from input to output
67
  inputs=gr.Image(type="pil"), # what are the inputs?
68
  outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
69
  gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
70
- # examples=example_list,
 
71
  title=title,
72
  description=description,
73
  article=article)
74
 
75
  # Launch the demo!
76
- demo.launch(debug=False)
 
1
+ %%writefile demos/foodvision_mini/app.py
2
  ### 1. Imports and class names setup ###
3
  import gradio as gr
4
  import os
5
  import torch
6
 
 
7
  from model import create_effnetb2_model
8
  from timeit import default_timer as timer
9
  from typing import Tuple, Dict
 
21
  # Load saved weights
22
  effnetb2.load_state_dict(
23
  torch.load(
24
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
25
  map_location=torch.device("cpu"), # load to CPU
26
  )
27
  )
 
55
 
56
  ### 4. Gradio app ###
57
 
 
 
58
  # Create title, description and article strings
59
+ title = "FoodVision Mini πŸ•πŸ₯©πŸ£"
60
  description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
61
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
62
+
63
+ # Create examples list from "examples/" directory
64
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
65
 
66
  # Create the Gradio demo
67
  demo = gr.Interface(fn=predict, # mapping function from input to output
68
  inputs=gr.Image(type="pil"), # what are the inputs?
69
  outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
70
  gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
71
+ # Create examples list from "examples/" directory
72
+ examples=example_list,
73
  title=title,
74
  description=description,
75
  article=article)
76
 
77
  # Launch the demo!
78
+ demo.launch()