File size: 1,603 Bytes
c43e6af
921daef
 
 
 
 
 
 
 
 
 
 
 
 
 
c43e6af
921daef
c43e6af
921daef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c43e6af
 
 
921daef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from fastai.vision.all import *
import gradio as gr
import torchvision.transforms as transforms
from pathlib import Path
import PIL
from huggingface_hub import from_pretrained_fastai

# Cargamos el learner

device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 
repo_id = "macapa/segmentation-mod"
model = from_pretrained_fastai(repo_id)
model = model.cpu()
model.eval()


def transform_image(image):

  #mask = PILMask.create(Path(str(image).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")))
  #image = PIL.Image.open(image)
  my_transforms = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          [0.485, 0.456, 0.406],
                                          [0.229, 0.224, 0.225])])
  image_aux = image
  #my_transforms(image_aux).unsqueeze(0).to(device)
  image = transforms.Resize((480,640))(Image.fromarray(image))
  tensor = my_transforms(image_aux).unsqueeze(0).to(device)
  #tensor = transform_image(image=image)
     
  
  
  model.to(device)
  with torch.no_grad():
    outputs = model(tensor)
  
  outputs = torch.argmax(outputs,1)
  
  mask = np.array(outputs.cpu())
  mask[mask==0]=255
  mask[mask==1]=150
  mask[mask==2]=76
  mask[mask==3]=25
  mask[mask==4]=0
  
  mask=np.reshape(mask,(480,640))
  return Image.fromarray(mask.astype('uint8'))

    
# Creamos la interfaz y la lanzamos. 
gr.Interface(fn=transform_image, inputs=gr.inputs.Image(shape=(640, 480)), outputs=gr.outputs.Image(),examples=['color_156.jpg','color_179.jpg']).launch(share=False)