Spaces:
Build error
Build error
File size: 1,256 Bytes
10367e5 ed21add 10367e5 325c3c6 956b27f ed21add 141cd5b 9815b9f 141cd5b 9eff9c7 5564a23 9eff9c7 ed21add 2de1e53 ed21add 10367e5 2de1e53 cfe0bd9 10367e5 2de1e53 10367e5 d93cb75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import requests
import gradio as gr
import torch
from torchvision.models import alexnet
import torch.nn as nn
from torchvision import transforms
LABELS = {0:'Cat', 1:'Dog'}
model = alexnet(pretrained=True)
for param in model.parameters():
param.requires_grad = False
# # Add a avgpool here
# avgpool = nn.AdaptiveAvgPool2d((7, 7))
#
# # Replace the classifier layer
# # to customise it according to our output
model.classifier = nn.Sequential(
nn.Linear( 256*7*7, 1024),
nn.Linear(1024, 256),
nn.Linear(256, 2))
checkpoint = torch.load(
"CatVsDogsModel.pth", map_location=torch.device("cpu")
)
model.load_state_dict(checkpoint["state_dict"])
model = model.to('cpu')
transform = transforms.Compose(
[transforms.Resize((128, 128)), transforms.ToTensor()]
)
def predict(img):
img = transform(img).to('cpu')
img = img.unsqueeze(0)
with torch.no_grad():
out= model(img)
probability = torch.nn.functional.softmax(out[0],dim=0)
print(out)
print(type(img))
values, indices = torch.topk(probability,k=2)
return {LABELS[i]: v.item() for i,v in zip(indices,values)}
iface = gr.Interface(fn=predict, inputs=gr.inputs.Image(type='pil'), outputs="label").launch()
iface.launch(share=True)
|