-
Notifications
You must be signed in to change notification settings - Fork 0
/
split_train_val_test.py
92 lines (72 loc) · 2.79 KB
/
split_train_val_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
"""
This script allows to split the dataset into three parts :
- a test set with 10% of the data
- a validation set with 10% of the data
- a training set with 80% of the data
Authors: Camille COCHENER, 2021
"""
import os
import argparse
import json
import numpy as np
import requests
from PIL import Image
from io import BytesIO
def parse_arguments():
"""Function to parse the arguments given in the command line"""
parser = argparse.ArgumentParser()
parser.add_argument('dataset_folder', help='Path to the raw dataset folder')
return parser.parse_args()
def split_dataset(args):
"""Function to read the annotations and to get the number of images for each part"""
path_to_annnotations = os.path.join(args.dataset_folder, 'annotations.json')
with open(path_to_annnotations, 'r') as f:
data = json.loads(f.read())
n_images = len(data)
# Split the dataset
train, validate, test = np.split(data, [int(len(data)*0.8), int(len(data)*0.9)])
train, validate, test = list(train), list(validate), list(test)
# Create folder
if not os.path.isdir("dataset"):
os.mkdir("dataset")
path_to_new_folder = "dataset"
list_folder = ["train", "val", "test"]
for i in list_folder:
if not os.path.isdir(os.path.join("dataset", i)):
os.mkdir(os.path.join("dataset", i))
annot_path = os.path.join("dataset", i, 'annotations.json')
if not os.path.isfile(annot_path):
if i == 'train':
with open(annot_path, 'w+') as f:
f.write(json.dumps(train))
if i == 'val':
with open(annot_path, 'w+') as f:
f.write(json.dumps(validate))
if i == 'test':
with open(annot_path, 'w+') as f:
f.write(json.dumps(test))
return train, validate, test
def get_images(train, validate, test):
"""Function to download the images for each part of the dataset"""
n_images = [len(train), len(validate), len(test)]
list_folder = ["train", "val", "test"]
subset = [train, validate, test]
for k, folder in enumerate(list_folder):
for i in range(n_images[k]):
image_name = subset[k][i]["External ID"]
image_url = subset[k][i]["Labeled Data"]
file_path = os.path.join("dataset", folder, image_name)
# Download the images
if not os.path.isfile(file_path):
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
img.save(file_path)
def main():
# Parse arguments
args = parse_arguments()
# Split the dataset
train, validate, test = split_dataset(args)
# Download images
get_images(train, validate, test)
if __name__ == '__main__':
main()