from huggingface_hub import from_pretrained_fastai import gradio as gr from fastai.vision.all import * from icevision.all import * from icevision.models.checkpoint import * import PIL checkpoint_path = "efficientdetMapaches.pth" model = models.ross.efficientdet.model(backbone=models.ross.efficientdet.backbones.tf_lite0(pretrained=True), num_classes=2, img_size=384) state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu')) model.load_state_dict(state_dict) infer_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(384),tfms.A.Normalize()]) # Definimos una funciĆ³n que se encarga de llevar a cabo las predicciones def predict(img): img = PIL.Image.fromarray(img, "RGB") pred_dict = model_type(img, infer_tfms, model.to("cpu"), class_map=ClassMap(['raccoon']), detection_threshold=0.5) return pred_dict["img"] # Creamos la interfaz y la lanzamos. gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="VFNet Inference")], examples=['raccoon-161.jpg','raccoon-162.jpg']).launch(share=False)