Datasets:
Upload 2 files
Browse files- cvat_preprocessor.py +43 -0
- dataloader.py +115 -0
cvat_preprocessor.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import xml.etree.ElementTree as ET
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class CVATPreprocessor():
|
| 5 |
+
"""Helper class to preprocess annotations in `CVAT for images 1.1` XML-encoded format"""
|
| 6 |
+
@staticmethod
|
| 7 |
+
def get_all_image_names(annotation_path):
|
| 8 |
+
"""Returns a list of all image names present in the annotation file"""
|
| 9 |
+
annotations = ET.parse(annotation_path).getroot()
|
| 10 |
+
images = annotations.findall("image")
|
| 11 |
+
return [image.attrib["name"] for image in images]
|
| 12 |
+
|
| 13 |
+
@staticmethod
|
| 14 |
+
def get_all_image_polygons(image_name, annotation_path):
|
| 15 |
+
"""
|
| 16 |
+
Returns a dictionary of all polygons for the given image name.
|
| 17 |
+
The key is the label and the value is a list of polygons (= each a list of points) associated with that label.
|
| 18 |
+
"""
|
| 19 |
+
annotations = ET.parse(annotation_path).getroot()
|
| 20 |
+
image = annotations.find(f"image[@name='{image_name}']")
|
| 21 |
+
raw_polygons = image.findall("polygon")
|
| 22 |
+
|
| 23 |
+
# Extract the label and the raw points for each polygon,
|
| 24 |
+
# parse the points to (x, y) and store each label-polygon pair in a list
|
| 25 |
+
processed_polygons = {}
|
| 26 |
+
for raw_polygon in raw_polygons:
|
| 27 |
+
label, points = raw_polygon.attrib["label"], raw_polygon.attrib["points"].split(";")
|
| 28 |
+
# Parse the points to (x, y) int pairs
|
| 29 |
+
points = [(int(float(point.split(",")[0])), int(float(point.split(",")[1]))) for point in points]
|
| 30 |
+
processed_polygons[label] = processed_polygons.get(label, []) + [points]
|
| 31 |
+
|
| 32 |
+
return processed_polygons
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
# Example usage
|
| 37 |
+
PATH_TO_ANNOTATIONS = "offline learning/semantic segmentation/data/annotations/"
|
| 38 |
+
PATH_TO_IMAGES = "offline learning/semantic segmentation/data/frames/"
|
| 39 |
+
CVAT_XML_FILENAME = "segmentation_annotation.xml"
|
| 40 |
+
imgs = CVATPreprocessor.get_all_image_names(PATH_TO_ANNOTATIONS + CVAT_XML_FILENAME)
|
| 41 |
+
polygons = CVATPreprocessor.get_all_image_polygons(imgs[0], PATH_TO_ANNOTATIONS + CVAT_XML_FILENAME)
|
| 42 |
+
print(f"Loaded {len(imgs)} images from {PATH_TO_ANNOTATIONS + CVAT_XML_FILENAME}")
|
| 43 |
+
print(f"Image '{imgs[0]} has {len(polygons)} polygon categories")
|
dataloader.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.utils.data
|
| 4 |
+
from PIL import Image, ImageDraw
|
| 5 |
+
from torchvision import transforms
|
| 6 |
+
|
| 7 |
+
from cvat_preprocessor import CVATPreprocessor
|
| 8 |
+
|
| 9 |
+
DEBUG = True
|
| 10 |
+
|
| 11 |
+
class DTSegmentationDataset(torch.utils.data.Dataset):
|
| 12 |
+
"""
|
| 13 |
+
Dataloader for the Duckietown dataset.
|
| 14 |
+
Loads the images and the corresponding segmentation targets.
|
| 15 |
+
"""
|
| 16 |
+
PATH_TO_ANNOTATIONS = "offline learning/semantic segmentation/data/annotations/"
|
| 17 |
+
PATH_TO_IMAGES = "offline learning/semantic segmentation/data/frames/"
|
| 18 |
+
CVAT_XML_FILENAME = "segmentation_annotation.xml"
|
| 19 |
+
SEGM_LABELS = {
|
| 20 |
+
'Background': {'id': 0, 'rgb_value': [0, 0, 0]}, # black
|
| 21 |
+
'Ego Lane': {'id': 1, 'rgb_value': [102, 255, 102]}, # green
|
| 22 |
+
'Opposite Lane': {'id': 2, 'rgb_value': [245, 147, 49]}, # orange
|
| 23 |
+
'Obstacle': {'id': 3, 'rgb_value': [184, 61, 245]}, # purple
|
| 24 |
+
'Road End': {'id': 4, 'rgb_value': [250, 50, 83]}, # red
|
| 25 |
+
'Intersection': {'id': 5, 'rgb_value': [50, 183, 250]}, # blue
|
| 26 |
+
'Middle Lane': {'id': 6, 'rgb_value': [255, 255, 0]}, # yellow
|
| 27 |
+
'Side Lane': {'id': 7, 'rgb_value': [255, 255, 255]}, # white
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def __init__(self):
|
| 31 |
+
super(DTSegmentationDataset, self).__init__()
|
| 32 |
+
# Store the list of all image names
|
| 33 |
+
self.imgs = CVATPreprocessor.get_all_image_names(self.PATH_TO_ANNOTATIONS + self.CVAT_XML_FILENAME)
|
| 34 |
+
|
| 35 |
+
def __getitem__(self, idx):
|
| 36 |
+
image_name = self.imgs[idx]
|
| 37 |
+
if DEBUG:
|
| 38 |
+
print(f"Fetching image {image_name}")
|
| 39 |
+
# load the image
|
| 40 |
+
img = Image.open(self.PATH_TO_IMAGES + image_name).convert("RGB")
|
| 41 |
+
|
| 42 |
+
# load the associated segmentation mask (list of polygons)
|
| 43 |
+
all_polygons = CVATPreprocessor.get_all_image_polygons(image_name, self.PATH_TO_ANNOTATIONS + self.CVAT_XML_FILENAME)
|
| 44 |
+
|
| 45 |
+
# Create a target image with the same spacial dimensions as the original image
|
| 46 |
+
# but a separate channel for each label
|
| 47 |
+
target = np.zeros((640, 480)).astype(np.longlong)
|
| 48 |
+
|
| 49 |
+
# Generate a random angle for rotation only once for both the image and the mask
|
| 50 |
+
random_angle = np.random.randint(-10, 10)
|
| 51 |
+
|
| 52 |
+
# Fill each channel with 1s where the corresponding label is present and 0s otherwise
|
| 53 |
+
for label, polygons in all_polygons.items():
|
| 54 |
+
# Create an empty bitmask for the current label and draw all label-associated polygons on it
|
| 55 |
+
mask = Image.new('L', img.size, 0)
|
| 56 |
+
drawer = ImageDraw.Draw(mask)
|
| 57 |
+
for polygon in polygons:
|
| 58 |
+
drawer.polygon(polygon, outline=255, fill=255)
|
| 59 |
+
# Show the mask for extra debugging
|
| 60 |
+
# mask.show()
|
| 61 |
+
|
| 62 |
+
# Rotate the mask
|
| 63 |
+
mask = transforms.Compose([
|
| 64 |
+
transforms.Resize((640, 480))
|
| 65 |
+
])(mask)
|
| 66 |
+
mask = transforms.functional.rotate(mask, random_angle)
|
| 67 |
+
|
| 68 |
+
mask = np.array(mask) == 255
|
| 69 |
+
if DEBUG:
|
| 70 |
+
print(f"Label '{label}' has {np.sum(mask)} pixels. Assigning them a value {self.SEGM_LABELS[label]['id']}")
|
| 71 |
+
|
| 72 |
+
# Merge three road classes into one to improve the performance of the model
|
| 73 |
+
if label in ['Ego Lane', 'Opposite Lane', 'Intersection']:
|
| 74 |
+
target[mask] = self.SEGM_LABELS['Ego Lane']['id']
|
| 75 |
+
else:
|
| 76 |
+
target[mask] = self.SEGM_LABELS[label]['id']
|
| 77 |
+
|
| 78 |
+
img = transforms.Compose([
|
| 79 |
+
transforms.ToTensor(),
|
| 80 |
+
transforms.Resize((640, 480)),
|
| 81 |
+
transforms.ColorJitter(brightness=0.7, contrast=0.6, saturation=0.2),
|
| 82 |
+
# Normalize the image with the mean and standard deviation of the ImageNet dataset
|
| 83 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 84 |
+
])(img)
|
| 85 |
+
img = transforms.functional.rotate(img, random_angle)
|
| 86 |
+
|
| 87 |
+
target = torch.from_numpy(target)
|
| 88 |
+
|
| 89 |
+
return img, target
|
| 90 |
+
|
| 91 |
+
def __len__(self):
|
| 92 |
+
return len(self.imgs)
|
| 93 |
+
|
| 94 |
+
@staticmethod
|
| 95 |
+
def label_img_to_rgb(label_img):
|
| 96 |
+
"""
|
| 97 |
+
Converts a label image (with one channel per label) to an RGB image.
|
| 98 |
+
"""
|
| 99 |
+
rgb_img = np.zeros((label_img.shape[0], label_img.shape[1], 3), dtype=np.uint8)
|
| 100 |
+
for label, label_info in DTSegmentationDataset.SEGM_LABELS.items():
|
| 101 |
+
mask = label_img == label_info['id']
|
| 102 |
+
rgb_img[mask] = label_info['rgb_value']
|
| 103 |
+
return rgb_img
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# ---------------------
|
| 107 |
+
# Randomly select a batch of images and masks from the dataset
|
| 108 |
+
# and visualize them to check if the dataloader works correctly
|
| 109 |
+
|
| 110 |
+
if __name__ == "__main__":
|
| 111 |
+
if DEBUG:
|
| 112 |
+
dataset = DTSegmentationDataset()
|
| 113 |
+
image, target = dataset[0]
|
| 114 |
+
transforms.ToPILImage()(image).show()
|
| 115 |
+
transforms.ToPILImage()(DTSegmentationDataset.label_img_to_rgb(target)).show()
|