Chaitanya Garg
commited on
Commit
•
cb1857e
1
Parent(s):
a6019f9
completed model
Browse files- EffNetModel.pt +3 -0
- app.py +32 -0
- helper.py +36 -0
- model.py +18 -0
- predictor.py +24 -0
- requirements.txt +3 -0
EffNetModel.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55228d3b8996e17611d32e845e3fdbae61c00ee8d7a4ac008a413a1da7734329
|
3 |
+
size 31254650
|
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Imports for Modules ###
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
from typing import Tuple, Dict
|
6 |
+
from timeit import default_timer as timer
|
7 |
+
|
8 |
+
### Functional Imports
|
9 |
+
from predictor import predictionMaker
|
10 |
+
|
11 |
+
exampleList = [["examples/" + example] for example in os.listdir("examples")]
|
12 |
+
|
13 |
+
title = "Detecting Retinal Diseases for Early Prevention"
|
14 |
+
description = "An EfficientNetB2 feature extractor computer vision model to classify OCT images into Brain Tumor types: CNV, DME, Drusen and Normal"
|
15 |
+
article = "Created by [Eternal Bliassard](https://github.com/EternalBlissard)."
|
16 |
+
|
17 |
+
# Create the Gradio demo
|
18 |
+
demo = gr.Interface(fn=predictionMaker,
|
19 |
+
inputs=[gr.Image(type="pil")],
|
20 |
+
outputs=[gr.Label(num_top_classes=2, label="Predictions"),
|
21 |
+
gr.Number(label="Prediction time (s)")],
|
22 |
+
examples=exampleList,
|
23 |
+
title=title,
|
24 |
+
description=description,
|
25 |
+
article=article)
|
26 |
+
|
27 |
+
# Launch the demo!
|
28 |
+
demo.launch()
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
helper.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
import torch
|
6 |
+
import random
|
7 |
+
import zipfile
|
8 |
+
from pathlib import Path
|
9 |
+
import requests
|
10 |
+
|
11 |
+
def setAllSeeds(seed):
|
12 |
+
os.environ['MY_GLOBAL_SEED'] = str(seed)
|
13 |
+
random.seed(seed)
|
14 |
+
np.random.seed(seed)
|
15 |
+
torch.manual_seed(seed)
|
16 |
+
torch.cuda.manual_seed_all(seed)
|
17 |
+
|
18 |
+
def dataDownloader(src,dest):
|
19 |
+
downloadPath = Path("downloadedData/")/dest
|
20 |
+
|
21 |
+
if(downloadPath.is_dir()):
|
22 |
+
print(f"{downloadPath} directory already exists, skipping downloading procedure")
|
23 |
+
else:
|
24 |
+
print(f"{downloadPath} directory doesn't already exists, starting downloading procedure")
|
25 |
+
downloadPath.mkdir(parents=True,exist_ok=True)
|
26 |
+
target = Path(src).name
|
27 |
+
with open(Path("downloadedData/")/target,"wb") as f:
|
28 |
+
requested = requests.get(src)
|
29 |
+
print(f"Downloading {target} from {src}")
|
30 |
+
f.write(requested.content)
|
31 |
+
|
32 |
+
with zipfile.ZipFile(Path("downloadedData/")/target,"r") as zipRef:
|
33 |
+
print(f"Unzipping the data")
|
34 |
+
zipRef.extractall(downloadPath)
|
35 |
+
os.remove(Path("downloadedData/")/target)
|
36 |
+
return downloadPath
|
model.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import torchvision
|
4 |
+
from torch import nn
|
5 |
+
from helper import setAllSeeds
|
6 |
+
|
7 |
+
def getEffNetModel(seed,numClasses):
|
8 |
+
setAllSeeds(seed)
|
9 |
+
effNetWeights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
10 |
+
effNetTransforms = effNetWeights.transforms()
|
11 |
+
effNet = torchvision.models.efficientnet_b2(weights=effNetWeights)
|
12 |
+
for param in effNet.parameters():
|
13 |
+
param.requires_grad = False
|
14 |
+
effNet.classifier = nn.Sequential(
|
15 |
+
nn.Dropout(p=0.3,inplace=True),
|
16 |
+
nn.Linear(1408,numClasses,bias=True)
|
17 |
+
)
|
18 |
+
return effNet,effNetTransforms
|
predictor.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Imports for Modules ###
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
from typing import Tuple, Dict
|
6 |
+
from timeit import default_timer as timer
|
7 |
+
|
8 |
+
### Functional Imports
|
9 |
+
from model import getEffNetModel
|
10 |
+
|
11 |
+
classNames = ['CNV', 'DME', 'Drusen', 'Normal']
|
12 |
+
effNetModel, effNetTransforms = getEffNetModel(42,len(classNames))
|
13 |
+
effNetModel.load_state_dict(torch.load(f="EffNetModel.pt",map_location=torch.device("cpu")))
|
14 |
+
|
15 |
+
def predictionMaker(img):
|
16 |
+
startTime = timer()
|
17 |
+
img = effNetTransforms(img).unsqueeze(0)
|
18 |
+
effNetModel.eval()
|
19 |
+
with torch.inference_mode():
|
20 |
+
predProbs = torch.softmax(effNetModel(img),dim=1)
|
21 |
+
predDict = {classNames[i]: float(predProbs[0][i]) for i in range(len(classNames))}
|
22 |
+
endTime = timer()
|
23 |
+
predTime = round(endTime-startTime,4)
|
24 |
+
return predDict,predTime
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.2.0
|
2 |
+
torchvision==0.17.0
|
3 |
+
gradio==4.20.0
|