|
import numpy as np |
|
import torch |
|
import torch.utils.data |
|
from PIL import Image, ImageDraw |
|
from torchvision import transforms |
|
|
|
from cvat_preprocessor import CVATPreprocessor |
|
|
|
DEBUG = True |
|
|
|
class DTSegmentationDataset(torch.utils.data.Dataset): |
|
""" |
|
Dataloader for the Duckietown dataset. |
|
Loads the images and the corresponding segmentation targets. |
|
""" |
|
PATH_TO_ANNOTATIONS = "offline learning/semantic segmentation/data/annotations/" |
|
PATH_TO_IMAGES = "offline learning/semantic segmentation/data/frames/" |
|
CVAT_XML_FILENAME = "segmentation_annotation.xml" |
|
SEGM_LABELS = { |
|
'Background': {'id': 0, 'rgb_value': [0, 0, 0]}, |
|
'Ego Lane': {'id': 1, 'rgb_value': [102, 255, 102]}, |
|
'Opposite Lane': {'id': 2, 'rgb_value': [245, 147, 49]}, |
|
'Obstacle': {'id': 3, 'rgb_value': [184, 61, 245]}, |
|
'Road End': {'id': 4, 'rgb_value': [250, 50, 83]}, |
|
'Intersection': {'id': 5, 'rgb_value': [50, 183, 250]}, |
|
'Middle Lane': {'id': 6, 'rgb_value': [255, 255, 0]}, |
|
'Side Lane': {'id': 7, 'rgb_value': [255, 255, 255]}, |
|
} |
|
|
|
def __init__(self): |
|
super(DTSegmentationDataset, self).__init__() |
|
|
|
self.imgs = CVATPreprocessor.get_all_image_names(self.PATH_TO_ANNOTATIONS + self.CVAT_XML_FILENAME) |
|
|
|
def __getitem__(self, idx): |
|
image_name = self.imgs[idx] |
|
if DEBUG: |
|
print(f"Fetching image {image_name}") |
|
|
|
img = Image.open(self.PATH_TO_IMAGES + image_name).convert("RGB") |
|
|
|
|
|
all_polygons = CVATPreprocessor.get_all_image_polygons(image_name, self.PATH_TO_ANNOTATIONS + self.CVAT_XML_FILENAME) |
|
|
|
|
|
|
|
target = np.zeros((640, 480)).astype(np.longlong) |
|
|
|
|
|
random_angle = np.random.randint(-10, 10) |
|
|
|
|
|
for label, polygons in all_polygons.items(): |
|
|
|
mask = Image.new('L', img.size, 0) |
|
drawer = ImageDraw.Draw(mask) |
|
for polygon in polygons: |
|
drawer.polygon(polygon, outline=255, fill=255) |
|
|
|
|
|
|
|
|
|
mask = transforms.Compose([ |
|
transforms.Resize((640, 480)) |
|
])(mask) |
|
mask = transforms.functional.rotate(mask, random_angle) |
|
|
|
mask = np.array(mask) == 255 |
|
if DEBUG: |
|
print(f"Label '{label}' has {np.sum(mask)} pixels. Assigning them a value {self.SEGM_LABELS[label]['id']}") |
|
|
|
|
|
if label in ['Ego Lane', 'Opposite Lane', 'Intersection']: |
|
target[mask] = self.SEGM_LABELS['Ego Lane']['id'] |
|
else: |
|
target[mask] = self.SEGM_LABELS[label]['id'] |
|
|
|
img = transforms.Compose([ |
|
transforms.ToTensor(), |
|
transforms.Resize((640, 480)), |
|
transforms.ColorJitter(brightness=0.7, contrast=0.6, saturation=0.2), |
|
|
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
|
])(img) |
|
img = transforms.functional.rotate(img, random_angle) |
|
|
|
target = torch.from_numpy(target) |
|
|
|
return img, target |
|
|
|
def __len__(self): |
|
return len(self.imgs) |
|
|
|
@staticmethod |
|
def label_img_to_rgb(label_img): |
|
""" |
|
Converts a label image (with one channel per label) to an RGB image. |
|
""" |
|
rgb_img = np.zeros((label_img.shape[0], label_img.shape[1], 3), dtype=np.uint8) |
|
for label, label_info in DTSegmentationDataset.SEGM_LABELS.items(): |
|
mask = label_img == label_info['id'] |
|
rgb_img[mask] = label_info['rgb_value'] |
|
return rgb_img |
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
if DEBUG: |
|
dataset = DTSegmentationDataset() |
|
image, target = dataset[0] |
|
transforms.ToPILImage()(image).show() |
|
transforms.ToPILImage()(DTSegmentationDataset.label_img_to_rgb(target)).show() |