Spaces:
Sleeping
Sleeping
File size: 1,884 Bytes
c31bf4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from fastai.basics import *
from fastai.vision import models
from fastai.vision.all import *
from fastai.metrics import *
from fastai.data.all import *
from fastai.callback import *
from pathlib import Path
import random
import PIL
import torchvision.transforms as transforms
import gradio as gr
# Cargamos el learner
#learn = load_learner('export.pkl')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.jit.load("unet.pth")
model = model.cpu()
model.eval()
# Definimos las etiquetas de nuestro modelo
#labels = learn.dls.vocab
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
# Definimos una función que se encarga de llevar a cabo las predicciones
def predict(img):
img = PILImage.create(img)
image = transforms.Resize((480,640))(img)
tensor = transform_image(image=image)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==0]=255 #grape
mask[mask==1]=150 #leaves
mask[mask==2]=76 #pole
mask[mask==2]=74 #pole
mask[mask==3]=29 #wood
mask[mask==3]=25 #wood
mask=np.reshape(mask,(480,640))
return Image.fromarray(mask.astype('uint8'))
#pred,pred_idx,probs = learn.predict(img)
#return {labels[i]: float(probs[i]) for i in range(len(labels))}
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Image(),examples=['color_154.jpg','color_155.jpg']).launch(share=False) |