File size: 1,203 Bytes
a2b702a 5cdd622 f71bd31 df08274 d9238a9 a2b702a df08274 0cc18a8 a2b702a a652ef7 0cc18a8 a2b702a c8f5bbf 3ab2b72 a2b702a 943f5e8 f71bd31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
from icevision.all import *
from icevision.models.checkpoint import *
import PIL
checkpoint_path = "efficientdetMapaches.pth"
model = models.ross.efficientdet.model(backbone=models.ross.efficientdet.backbones.tf_lite0(pretrained=True),
num_classes=2,
img_size=384)
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
infer_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(384),tfms.A.Normalize()])
# Definimos una función que se encarga de llevar a cabo las predicciones
def predict(img):
img = PIL.Image.fromarray(img, "RGB")
pred_dict = models.ross.efficientdet.end2end_detect(img, infer_tfms, model.to("cpu"), class_map=ClassMap(['raccoon']), detection_threshold=0.5)
return pred_dict["img"]
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="VFNet Inference")],
examples=['raccoon-161.jpg','raccoon-162.jpg']).launch(share=False) |