-
Notifications
You must be signed in to change notification settings - Fork 0
/
segmentation_dataset.py
76 lines (67 loc) · 3.23 KB
/
segmentation_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from torch.utils.data import Dataset
import torch
import os
from PIL import Image
import numpy as np
from scipy.stats import mode
import matplotlib.pyplot as plt
class SegmentationDataset(Dataset):
def __init__(self, dataset_dir, transform=None, img_size=(24, 24)):
self.img_size = img_size
self.transform = transform
with open(os.path.join(dataset_dir, "ImageSets", "Segmentation", "val.txt")) as file:
imageIDs = [line.rstrip() for line in file]
self.image_paths = [os.path.join(dataset_dir, "JPEGImages", filename) + ".jpg" for filename in imageIDs]
self.annotation_paths = [os.path.join(dataset_dir, "SegmentationClass", filename) + ".png" for filename in imageIDs]
def __len__(self):
# this should return the size of the dataset
return len(self.image_paths)
def __getitem__(self, idx):
path = self.image_paths[idx]
# this should return one sample from the dataset
try:
image = self.transform(Image.open(self.image_paths[idx]))
except:
image = torch.tensor([1,1,1])
try:
annotation = self.preprocess_VOC_mask(self.annotation_paths[idx])
except:
annotation = torch.tensor([1,1,1])
return image, annotation, path
def create_color_mapping(self, unique_values):
num_values = len(unique_values)
color_mapping = np.zeros((num_values, 3))
# Generate evenly spaced hue values
hues = np.linspace(0, 1, num_values)
for i, value in enumerate(unique_values):
hue = hues[i]
color_mapping[i] = plt.cm.hsv(hue)[:3] # Convert hue to RGB
return color_mapping
def get_masked_image(self, image, annotation):
annotation = np.array(Image.open(annotation).resize(image.shape[1:], Image.NEAREST))
color_mapping = self.create_color_mapping(np.unique(annotation))
annotation = color_mapping[np.digitize(annotation, np.unique(annotation))-1]
masked_image = image*0.35 + np.transpose(annotation, [2,0,1])/0.65
return masked_image
def preprocess_VOC_mask(self, annotation_path):
#mask = np.array(Image.open(annotation_path).resize(self.img_size, Image.NEAREST))
mask = np.array(Image.open(annotation_path))
idxs = np.argwhere(mask == 255)
# Iterate over the indices and find most frequent value in the 8 surrounding values
for idx in idxs:
row, col = idx
# Define the square around the current index
square = mask[max(0, row - 1):min(row + 2, mask.shape[0]), max(0, col - 1):min(col + 2, mask.shape[1])]
# Flatten the square into a 1D array and remove the center value
flattened = square.flatten()
flattened = np.delete(flattened, flattened.size // 2)
# Find the most frequent value in the flattened array
most_frequent = mode(flattened)[0][0]
# Replace the value at the current index with the most frequent value
if most_frequent != 255:
mask[row, col] = most_frequent
else:
mask[row, col] = 0
# for i, u in enumerate(np.unique(mask)):
# mask[mask == u] = i
return torch.tensor(mask)