-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
7 changed files
with
341 additions
and
60 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,160 @@ | ||
import cv2 | ||
import numpy as np | ||
import random | ||
from aug_image import noisy_salt_and_pepper_img, noisy_gauss_img, brightness_img | ||
from aug_image import blur_img, saturation_img | ||
from aug_image import flip_horizontol_img, flip_vertical_img, rotate_img | ||
from PIL import Image | ||
|
||
def get_boxes(img, annotation_path): | ||
with open(annotation_path, 'r') as annotation_file: | ||
lines = annotation_file.readlines() | ||
|
||
boxes = [] | ||
for line in lines: | ||
data = line.strip().split() | ||
class_id, x_center, y_center, width, height = map(float, data) | ||
x_center *= img.shape[1] | ||
y_center *= img.shape[0] | ||
width *= img.shape[1] | ||
height *= img.shape[0] | ||
x1 = int(x_center - width / 2) | ||
y1 = int(y_center - height / 2) | ||
x2 = int(x_center + width / 2) | ||
y2 = int(y_center + height / 2) | ||
boxes.append((x1, y1, x2, y2)) | ||
return boxes | ||
|
||
|
||
def flip_horizontol_within_box(image_path, annotation_path): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
flip_box_h = flip_horizontol_img(box_region) | ||
flip_box_h = cv2.cvtColor(np.array(flip_box_h), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = flip_box_h | ||
cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def flip_vertical_within_box(image_path, annotation_path): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
flip_box_w = flip_vertical_img(box_region) | ||
flip_box_w = cv2.cvtColor(np.array(flip_box_w), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = flip_box_w | ||
cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def rotate_within_box(image_path, annotation_path, value): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
rotate_box = rotate_img(box_region, value) | ||
rotate_box = cv2.cvtColor(np.array(rotate_box), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = rotate_box | ||
cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def noisy_salt_and_pepper_within_box(image_path, annotation_path, noise_level): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
noisy_box = noisy_salt_and_pepper_img(box_region, noise_level) | ||
image[y1:y2, x1:x2] = noisy_box | ||
cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def noisy_gauss_within_box(image_path, annotation_path, std): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
noisy_box = noisy_gauss_img(box_region, std) | ||
image[y1:y2, x1:x2] = noisy_box | ||
# cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def brightness_within_box(image_path, annotation_path, std): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
image_box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
brightness_box = brightness_img(image_box_region, std) | ||
brightness_box = cv2.cvtColor(np.array(brightness_box), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = brightness_box | ||
# cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def blur_within_box(image_path, annotation_path, blur_radius): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
image_box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
brightness_box = blur_img(image_box_region, blur_radius) | ||
brightness_box = cv2.cvtColor(np.array(brightness_box), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = brightness_box | ||
# cv2.imwrite('image.jpg', image) | ||
|
||
|
||
def saturation_within_box(image_path, annotation_path, blur_radius): | ||
image = cv2.imread(image_path) | ||
boxes = get_boxes(image, annotation_path) | ||
for box in boxes: | ||
x1, y1, x2, y2 = box | ||
box_width = x2 - x1 | ||
box_height = y2 - y1 | ||
box_region = image[y1:y2, x1:x2] | ||
image_box_region = Image.fromarray(cv2.cvtColor(box_region, cv2.COLOR_BGR2RGB)) | ||
saturation_box = saturation_img(image_box_region, blur_radius) | ||
saturation_box = cv2.cvtColor(np.array(saturation_box), cv2.COLOR_RGB2BGR) | ||
image[y1:y2, x1:x2] = saturation_box | ||
# cv2.imwrite('image.jpg', image) | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
# image_path = '/media/space/ssd_1_tb_evo_sumsung/Work/Warp-D/test/images/Monitoring_photo_2_test_25-Mar_11-09-46.jpg' | ||
# annotation_path = '/media/space/ssd_1_tb_evo_sumsung/Work/Warp-D/test/labels/Monitoring_photo_2_test_25-Mar_11-09-46.txt' | ||
# # noisy_gauss_within_box(image_path, annotation_path, 17) | ||
# # noisy_salt_and_pepper_within_box(image_path, annotation_path, 0.7) | ||
# # brightness_within_box(image_path, annotation_path, 0.7) | ||
# # blur_within_box(image_path, annotation_path, 1.5) | ||
# # saturation_within_box(image_path, annotation_path, 3) | ||
|
||
# # flip_horizontol_within_box(image_path, annotation_path) | ||
# # flip_vertical_within_box(image_path, annotation_path) | ||
# rotate_within_box(image_path, annotation_path, 75) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
from PIL import Image, ImageEnhance, ImageFilter | ||
import numpy as np | ||
|
||
|
||
def flip_horizontol_img(img): | ||
img_horizontal_flip = img.transpose(Image.FLIP_LEFT_RIGHT) | ||
return img_horizontal_flip | ||
|
||
|
||
def flip_vertical_img(img): | ||
img_vertical_flip = img.transpose(Image.FLIP_TOP_BOTTOM) | ||
return img_vertical_flip | ||
|
||
|
||
def rotate_img(img, value, expand=False): | ||
img_rotated = img.rotate(value, expand=expand) | ||
return img_rotated | ||
|
||
|
||
def brightness_img(img, brightness_factor): | ||
enhancer = ImageEnhance.Brightness(img) | ||
img_brightened = enhancer.enhance(brightness_factor) | ||
return img_brightened | ||
|
||
|
||
def noisy_gauss_img(img, std): | ||
np_img = np.array(img) | ||
gauss = np.random.normal(0, std, np_img.shape) | ||
noisy_gauss_img = np_img + gauss | ||
noisy_gauss_img = np.clip(noisy_gauss_img, 0, 255).astype(np.uint8) | ||
noisy_gauss_img = Image.fromarray(noisy_gauss_img) | ||
return noisy_gauss_img | ||
|
||
|
||
def noisy_salt_and_pepper_img(img, noise_level): | ||
np_img = np.array(img) | ||
amount = int(np.ceil(noise_level * np_img.size / 3)) | ||
coords = [np.random.randint(0, i - 1, amount) for i in np_img.shape] | ||
np_img[coords[0], coords[1]] = 255 | ||
coords = [np.random.randint(0, i - 1, amount) for i in np_img.shape] | ||
np_img[coords[0], coords[1]] = 0 | ||
noisy_salt_pepper_img = Image.fromarray(np_img) | ||
return noisy_salt_pepper_img | ||
|
||
|
||
def blur_img(img, blur_radius): | ||
return img.filter(ImageFilter.GaussianBlur(radius=blur_radius)) | ||
|
||
|
||
def saturation_img(img, saturation_factor): | ||
enhancer = ImageEnhance.Color(img) | ||
image_saturation = enhancer.enhance(saturation_factor) | ||
return image_saturation | ||
|
||
|
||
|
||
|
||
img = Image.open('/media/space/ssd_1_tb_evo_sumsung/exp_pad/dataset/train_dataset/moja/train/images/16-Aug_12-29-54.png') | ||
|
||
|
||
|
||
# Adjust the saturation level (1.0 is the original saturation) | ||
# saturation_factor = 10 # You can change this value to adjust saturation (e.g., 0.5 for less saturation) | ||
# image_saturation = saturation_img(img, saturation_factor) | ||
# image_saturation.save('modified_image.jpg') | ||
|
||
# blur_radius = 1.5 # You can change this value to adjust the blur level | ||
# blurred_image = blur_img(img, blur_radius) | ||
|
||
# # Save the blurred image | ||
# blurred_image.save('blurred_image.png') | ||
|
||
|
||
# noisy_salt_pepper_img = noisy_salt_and_pepper(img, 0.2) | ||
# noisy_salt_pepper_img.save('noisy_salt_pepper_img.png') | ||
|
||
|
||
# img_noisy_gauss = noisy_gauss_img(img, 40) | ||
# img_noisy_gauss.save('noisy_gauss.png') | ||
|
||
|
||
# img_brightness = brightness_img(img, 1.5) | ||
# img_brightness.save('brightness.png') | ||
|
||
# img_horizontal_flip = flip_horizontol(img) | ||
# img_horizontal_flip.save('horizontal_flip.png') | ||
|
||
# img_vertical_flip = flip_vertical(img) | ||
# img_vertical_flip.save('img_vertical_flip.png') | ||
|
||
# # img_diagonal_flip_main = img.transpose(Image.Transpose.ROTATE_180) # Поворот на 90 градусов | ||
# # img_diagonal_flip_main.save('diagonal_flip_main.png') | ||
|
||
|
||
# img_rotated = rotate(img, 75, expand=False) | ||
# img_rotated.save('rotated_image.png') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import os | ||
from tqdm import tqdm | ||
from pathlib import Path | ||
from PIL import Image | ||
|
||
def resize_images_and_annotations(data_path, img_size): | ||
if isinstance(img_size, int): | ||
width = height = img_size | ||
elif isinstance(img_size, tuple) and len(img_size) == 2: | ||
width, height = img_size | ||
else: | ||
raise ValueError("Invalid img_size format. Please provide either an integer or a tuple of two integers.") | ||
|
||
path = Path(data_path) | ||
folder_names = [folder.name for folder in path.iterdir() if folder.is_dir()] | ||
|
||
for name in folder_names: | ||
folder_path = path / name | ||
images_path = os.path.join(folder_path, 'images') | ||
labels_path = os.path.join(folder_path, 'labels') | ||
|
||
for image_name in tqdm(os.listdir(images_path), desc=f'Resize {name} images'): | ||
image_path = os.path.join(images_path, image_name) | ||
label_path = os.path.join(labels_path, image_name.replace('.jpg', '.txt')) | ||
|
||
with Image.open(image_path) as img: | ||
original_width, original_height = img.size | ||
|
||
if original_width != width or original_height != height: | ||
img = img.resize((width, height)) | ||
|
||
if os.path.exists(label_path): | ||
with open(label_path, 'r') as file: | ||
lines = file.readlines() | ||
|
||
with open(label_path, 'w') as file: | ||
for line in lines: | ||
parts = line.split() | ||
if len(parts) == 5: | ||
x_center = float(parts[1]) * original_width | ||
y_center = float(parts[2]) * original_height | ||
box_width = float(parts[3]) * original_width | ||
box_height = float(parts[4]) * original_height | ||
|
||
x_center *= width / original_width | ||
y_center *= height / original_height | ||
box_width *= width / original_width | ||
box_height *= height / original_height | ||
|
||
x_center /= width | ||
y_center /= height | ||
box_width /= width | ||
box_height /= height | ||
|
||
file.write(f"{parts[0]} {x_center} {y_center} {box_width} {box_height}\n") | ||
|
||
img.save(image_path) | ||
|
||
|
||
# resize_images_and_annotations('/media/space/ssd_1_tb_evo_sumsung/Work/Warp-D', (640, 480)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.