CarModel / main.py
PDG's picture
Update main.py
5a2cb16
import gradio as gr
import torch
from torchvision import models, transforms
# -- get torch and cuda version
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
'''
# -- install pre-build detectron2
!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/{CUDA_VERSION}/{TORCH_VERSION}/index.html
import detectron2
from detectron2.utils.logger import setup_logger # ????
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
# ????
setup_logger()
# -- load rcnn model
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
!wget http://images.cocodataset.org/val2017/000000439715.jpg -q -O input.jpg
im = cv2.imread("./input.jpg")
cv2_imshow(im)
outputs = predictor(im)
print(outputs["instances"].pred_classes)
print(outputs["instances"].pred_boxes)
'''
# -- load Mask R-CNN model for segmentation
DesignModernityModel = torch.load("DesignModernityModel.pt")
#INPUT_FEATURES = DesignModernityModel.fc.in_features
#linear = nn.linear(INPUT_FEATURES, 5)
DesignModernityModel.eval() # set state of the model to inference
LABELS = ['2000-2004', '2006-2008', '2009-2011', '2012-2015', '2016-2018']
carTransforms = transforms.Compose([transforms.Resize(224)])
def classifyCar(im):
im = Image.fromarray(im.astype('uint8'), 'RGB')
im = carTransforms(im).unsqueeze(0) # transform and add batch dimension
with torch.no_grad():
scores = torch.nn.functional.softmax(model(im)[0])
return {LABELS[i]: float(scores[i]) for i in range(2)}
examples = [[example_img.jpg], [example_img2.jpg]] # must be uploaded in repo
# create interface for model
interface = gr.Interface(classifyCar, inputs='Image', outputs='label', cache_examples=False, title='VW Up or Fiat 500', example=examples)
interface.launch()