forked from ipc-lab/NDIC
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathPairCityscape.py
92 lines (71 loc) · 2.81 KB
/
PairCityscape.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
from random import random
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
import torchvision.transforms.functional as TF
from PIL import Image
class PairCityscape(Dataset):
def __init__(self, path, set_type, resize=(128, 256)):
super(Dataset, self).__init__()
self.resize = resize
self.dataset = {
'left': os.path.join(path, 'leftImg8bit', set_type),
'right': os.path.join(path, 'rightImg8bit', set_type)
}
self.cities = [
item
for item in os.listdir(self.dataset['left'])
if os.path.isdir(os.path.join(self.dataset['left'], item))
]
self.ar = []
for city in self.cities:
pair_names = [
'_'.join(f.split('_')[:-1])
for f in os.listdir(os.path.join(self.dataset['left'], city))
if os.path.splitext(f)[-1].lower() == '.png'
]
for pair in pair_names:
left_img = os.path.join(self.dataset['left'], city, pair + '_leftImg8bit.png')
right_img = os.path.join(self.dataset['right'], city, pair + '_rightImg8bit.png')
self.ar.append((left_img, right_img))
if set_type == 'train':
self.transform = self.train_deterministic_cropping
elif set_type == 'test' or set_type == 'val':
self.transform = self.test_val_deterministic_cropping
def train_deterministic_cropping(self, img, side_img):
# Resize
img = TF.resize(img, self.resize)
side_img = TF.resize(side_img, self.resize)
# Random Horizontal Flip
if random() > 0.5:
img = TF.hflip(img)
side_img = TF.hflip(side_img)
# Convert to Tensor
img = transforms.ToTensor()(img)
side_img = transforms.ToTensor()(side_img)
return img, side_img
def test_val_deterministic_cropping(self, img, side_img):
# Resize
img = TF.resize(img, self.resize)
side_img = TF.resize(side_img, self.resize)
# Convert to Tensor
img = transforms.ToTensor()(img)
side_img = transforms.ToTensor()(side_img)
return img, side_img
def __getitem__(self, index):
left_path, right_path = self.ar[index]
img = Image.open(left_path)
side_img = Image.open(right_path)
image_pair = self.transform(img, side_img)
return image_pair[0], image_pair[1], (left_path, right_path), index
def __len__(self):
return len(self.ar)
def __str__(self):
return 'Cityscape'
if __name__ == '__main__':
ds = PairCityscape(path='./', set_type='train')
ds = DataLoader(dataset=ds)
for data in ds:
img, cor_img, idx, _ = data
print(img.shape, idx)
print(len(ds))