-
Notifications
You must be signed in to change notification settings - Fork 0
/
colorfunctions.py
1118 lines (1050 loc) · 47.2 KB
/
colorfunctions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import cv2
from PIL import Image
from scipy import signal,ndimage
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import math
import openpyxl
import pickle
import pandas as pd
import os as os
import tiffile as tif
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
import numpy.matlib as matlib
def output_for_labeling(crop_params, crop_img_pil, imgn_erode, sample_ID, water,crop_image_erode,
PIL_crop_image_erode, img_name, Notes, PTL):
# Create a Sample class
# Create Drop_IDs List
num_drops = np.unique(imgn_erode)
if np.size(np.where(num_drops==0)) != 0:
num_drops = np.extract(num_drops!=0, num_drops)
drop_IDs = num_drops
Number_of_drops = np.size(drop_IDs)
# Create the 3D array drops where each layer is zero everywhere except the location of a single droplet
drops_rows = np.size(imgn_erode, 0)
drops_col = np.size(imgn_erode, 1)
drops = np.zeros((Number_of_drops,drops_rows,drops_col))
for i in range(Number_of_drops):
n = drop_IDs[i]
drop_loc = np.copy(imgn_erode)
drop_loc[drop_loc!=n] = 0
drops[i,:,:] = drop_loc
sample = Sample(crop_params, crop_img_pil,sample_ID, drop_IDs, drops, Number_of_drops, water, imgn_erode,
crop_image_erode, PIL_crop_image_erode, img_name, Notes, PTL)
# Save all information in a pickle format
sample.save()
return sample
def time_names_0(time_step,cut,start_image):
# function that takes in Tiff file and impacts the images and labels them according to the time stamp
# cut = 30
i = 0
# Read each Tiff Image store in img_raw and count the number of pictures and store as i
for name in os.listdir('./Images_Tiff'):
if name.endswith('.tif'):
#print(name)
i+=1
#print(f'./Images_Tiff/{name}')
#print(os.path.isfile(f'./Images_Tiff/{name}'))
if os.path.isfile(f'./Images_Tiff/{name}'):
if i == 1:
#print(i)
img_raw = tif.imread(f'./Images_Tiff/{name}')
else :
# print(i)
img_raw = np.concatenate((img_raw,tif.imread(f'./Images_Tiff/{name}')))
# img_raw_start = img_raw[start_image-1, :,:]
#print(img_raw.shape)
img_raw_start = np.copy(img_raw[start_image:,:,:,:])
num_pics = np.size(img_raw_start,0)
num = math.floor(num_pics/cut)
time = np.arange(1,(num)+1,1)*(time_step*cut)
time.reshape(num,1)
strings = [str(x) for x in time]
name0 = ['Img_']*num
name1 = [''.join(element) for element in zip(name0 ,strings)]
jpg = ['.jpg'] * num
name = [''.join(element) for element in zip(name1 ,jpg)]
# Save the tiff images as jpg with the given names
for i in range(num):
j = cut*i
if (j>num_pics-1):
break
im = Image.fromarray(img_raw_start[j,:,:])
im.save(f'./Images_simp/{name[i]}')
#print(img_raw_start.shape)
return num, time , name
def crop_box(im,xvals,yvals,verbose):
# Redraw the box to confirm it what they want
# Determine the Rectangle lower left point
ll_y = math.ceil(min(yvals))
ll_x = math.ceil(min(xvals))
# Determine rectangle width
width = math.ceil(max(xvals)) - ll_x
# Determine rectangle height
height = math.ceil(max(yvals)) - ll_y
fig, ax = plt.subplots(1, figsize=(1248 / 200, 1024 / 300))
ax.imshow(im)
plt.tick_params(left = False, right = False , labelleft = False ,
labelbottom = False, bottom = False)
# Add rectangle
ax.add_patch(Rectangle((ll_x, ll_y), width, height, edgecolor='red', fill=False))
plt.tick_params(left = False, right = False , labelleft = False ,
labelbottom = False, bottom = False)
plt.title(f"Cropped Area Sample")
if verbose:
plt.show()
plt.close()
return ll_x, ll_y, width, height
def RRC(img_path, rotate_crop_params,package):
'''
Rotates and crops the given image.
Inputs:
img := image path
rotate_crop_params := dictionary of values: {theta, x1, x2, y1, y2}, where
theta := angle of counter clockwise rotation
x1 := start pixel of x-axis crop
x2 := end pixel of x-axis crop
y1 := start pixel of y-axis crop
y2 := end pixel of y-axis crop
Ouputs:
img := rotated and cropped image
'''
if package == 'cv2':
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) # read images
elif package == 'pil':
img = Image.open(img_path, 'r')
rotated = ndimage.rotate(img, rotate_crop_params['theta']) # reads image and rotates
img = rotated[rotate_crop_params['y1']:rotate_crop_params['y2'],
rotate_crop_params['x1']:rotate_crop_params['x2']] # crops image
return img
def segment_on_dt(a, img, threshold):
'''
Implements watershed segmentation.
Inputs:
a := the raw image input
img := threshold binned image
threshold := RGB threshold value
Outputs:
lbl := Borders of segmented droplets
wat := Segmented droplets via watershed
lab := Indexes of each segmented droplet
'''
# estimate the borders of droplets based on known and unknown background + foreground (computed using dilated and erode)
border = cv2.dilate(img, None, iterations=1)
border = border - cv2.erode(border, None)
# segment droplets via distance mapping and thresholding
dt = cv2.distanceTransform(img, 2, 3)
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
_, dt = cv2.threshold(dt, threshold, 255, cv2.THRESH_BINARY)
# obtain the map of segmented droplets with corresponding indices
lbl, ncc = ndimage.label(dt)
lbl = lbl * (255 / (ncc + 1))
lab = lbl
# Completing the markers now.
lbl[border == 255] = 255
lbl = lbl.astype(np.int32)
a = cv2.cvtColor(a,
cv2.COLOR_GRAY2BGR) # we must convert grayscale to BGR because watershed only accepts 3-channel inputs
wat = cv2.watershed(a, lbl)
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl, wat, lab # return lab, the segmented and indexed droplets
def water(image, small_elements_pixels, large_elements_pixels,k):
'''
Applies watershed image segmentation to separate droplet pixels from background pixels.
Inputs:
image := input droplet image to segment
large_elements_pixels := Cleans large elements that contain more than specified number of pixels.
Outputs:
droplet_count := Image of droplet interiors indexed by droplet number
binarized := Binary image indicating total droplet area vs. empty tube space
'''
RGB_threshold = 0
pixel_threshold = 0
# Added these Lines
kernel = np.ones((k,k), np.uint8)
image= cv2.dilate(image, kernel)
# added these lines
img = image.copy()
img = 255 - img
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = cv2.medianBlur(img, 3)
_, img_bin = cv2.threshold(img, 0, 255,
# threshold image using Otsu's binarization
# https://docs.opencv.org/4.x/d7/d4d/tutorial_py_thresholding.html
cv2.THRESH_OTSU)
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,
np.ones((4, 4), dtype=int))
# first fold of watershed to remove white centers
result, water, labs = segment_on_dt(a=img, img=img_bin,
threshold=RGB_threshold)
# segment droplets from background and return indexed droplets
# remove small/large elements
uniq_full, uniq_counts = np.unique(water,
return_counts=True) # get all unique watershed indices with pixel counts
large_elements = uniq_full[uniq_counts > large_elements_pixels] # mask large elements based on number of pixels
small_elements = uniq_full[uniq_counts < small_elements_pixels] # mask small elements based on number of pixels
for n in range(len(large_elements)):
water[water == large_elements[n]] = 0 # remove all large elements
for n in range(len(small_elements)):
water[water == small_elements[n]] = 0 # remove all small elements
result[result == 255] = 0
droplet_count = result.copy()
return water
def cc_crop(x_vals,y_vals, x_space,y_space,img,spacex,spacey,offsetx,offsety,verbose):
# x_vals is a 1x2 array with min and max x value of the whole color card crop box
# y_vals is the same
# x_space is spacing in the x -direction between boxes
# y_vals is spacing in the y direction between boxes
r = 4
c = 6
xvals = np.zeros((r,c))
yvals = xvals
fig, ax = plt.subplots(1,figsize=(1248 / 200, 1024 / 300))
ax.imshow(img)
plt.tick_params(left = False, right = False , labelleft = False ,
labelbottom = False, bottom = False)
width = x_space
height = y_space
ll_x_a = []
ll_y_a = []
for i in range(c):
for j in range(r):
ll_y = math.ceil(min(y_vals)+ (j)*(y_space + spacey)+ offsety*i)
ll_x = math.ceil(min(x_vals)+(i)*(x_space + spacex)+offsetx*j)
ax.add_patch(Rectangle((ll_x, ll_y), width, height, edgecolor='red', fill=False))
ll_x_a.append(ll_x)
ll_y_a.append(ll_y)
plt.title(f'Cropped Colors of Xrite Color Card')
if verbose:
plt.show()
plt.close()
return ll_x_a, ll_y_a
# Function to extract Color Card RGB data from 1 image
def CC_RGB(cc_ll_x, cc_ll_y, width,height,img_path):
# set Crop indices
R_drop_cc = []
G_drop_cc=[]
B_drop_cc = []
R_hi_drop_cc = []
G_hi_drop_cc = []
B_hi_drop_cc = []
R_lo_drop_cc = []
G_lo_drop_cc = []
B_lo_drop_cc = []
for j in range(24):
crop_params = {
'theta': 0,
'x1': cc_ll_x[j],
'x2': cc_ll_x[j]+width,
'y1': cc_ll_y[j],
'y2': cc_ll_y[j]+height
}
crop_image = RRC(img_path, crop_params,'pil')
R_channel = np.array(crop_image, dtype=np.uint8)[:,:,0]
G_channel = np.array(crop_image, dtype=np.uint8)[:,:,1]
B_channel = np.array(crop_image, dtype=np.uint8)[:,:,2]
R_drop_cc.append([round(np.mean(R_channel))])
G_drop_cc.append([round(np.mean(G_channel))])
B_drop_cc.append([round(np.mean(B_channel))])
R_hi_drop_cc.append([np.percentile(np.sort(R_channel, axis = None), 95)])
G_hi_drop_cc.append([np.percentile(np.sort(G_channel, axis = None), 95)])
B_hi_drop_cc.append([np.percentile(np.sort(B_channel, axis = None), 95)])
R_lo_drop_cc.append([np.percentile(np.sort(R_channel, axis = None), 5)])
G_lo_drop_cc.append([np.percentile(np.sort(G_channel, axis = None), 5)])
B_lo_drop_cc.append([np.percentile(np.sort(B_channel, axis = None), 5)])
# For each square create RGB series over time, the matrices returned contain nested array where R_drop[0] for
# example has all values over time of Red channel of the first Color Card color
return R_drop_cc,G_drop_cc,B_drop_cc, R_hi_drop_cc,G_hi_drop_cc,B_hi_drop_cc, R_lo_drop_cc,G_lo_drop_cc,B_lo_drop_cc
def convert_LAB_RGB(data, to_space, from_space):
# Input:
# - data: a np array with dimensions (n_samples, {optional
# dimension: n_times}, n_color_coordinates=3) (e.g., a direct output of
# 'rgb_extractor()' or 'rgb_extractor_Xrite_CC()')
# - from_space: choose either 'RGB' or 'Lab'
# - to_space: choose either 'RGB' or 'Lab'
# Output:
# - converted: a np array with the same dimensions than in the input
n_d = data.ndim
if n_d == 2:
data = np.expand_dims(data, 1)
elif n_d != 3:
raise Exception('Faulty number of dimensions in the input!')
if (from_space == 'RGB') and (to_space == 'LAB'):
# Values from rgb_extractor() are [0,255] so let's normalize.
data = data/255
# Transform to color objects (either sRGBColor or LabColor).
data_objects = np.vectorize(lambda x,y,z: sRGBColor(x,y,z))(
data[:,:,0], data[:,:,1], data[:,:,2])
# Target color space
color_space = matlib.repmat(LabColor, *data_objects.shape)
# Transform from original space to new space
converted_objects = np.vectorize(lambda x,y: convert_color(x,y))(
data_objects, color_space)
# We got a matrix of color objects. Let's transform to a 3D matrix of floats.
converted = np.transpose(np.vectorize(lambda x: (x.lab_l, x.lab_a, x.lab_b))(
converted_objects), (1,2,0))
elif (from_space == 'LAB') and (to_space == 'RGB'):
data_objects = np.vectorize(lambda x,y,z: LabColor(x,y,z))(
data[:,:,0], data[:,:,1], data[:,:,2])
color_space = matlib.repmat(sRGBColor, *data_objects.shape)
converted_objects = np.vectorize(lambda x,y: convert_color(x,y))(
data_objects, color_space)
converted = np.transpose(np.vectorize(lambda x: (x.rgb_r, x.rgb_g, x.rgb_b))(
converted_objects), (1,2,0))
# Colormath library interprets rgb in [0,1] and we want [0,255] so let's
# normalize to [0,255].
converted = converted*255
else:
raise Exception('The given input space conversions have not been implemented.')
if n_d == 2:
converted = np.squeeze(converted)
return converted
# New function for faster color calibration
# Taken from previous color calibration code in 2023
def color_calibration(drop,ll_x, ll_y, width,heigth, file,XR,XG,XB, XR_hi,XG_hi,XB_hi,
XR_lo,XG_lo,XB_lo,reference_CC_lab):
# Let's extract the rgb colors from our color passport picture.
# Convert the color card into LAB values
img_array = np.array(np.stack((XR,XG,XB), axis= 2),dtype = np.uint8)
#RGB_reshaped = img_array.reshape(24,3)
#CC_lab = convert_LAB_RGB(RGB_reshaped, 'LAB', 'RGB')
RGB_reshaped = img_array.reshape(24,1,3)
CC_lab = cv2.cvtColor((RGB_reshaped/255).astype('float32'), cv2.COLOR_RGB2LAB)
CC_lab = CC_lab.reshape(24,3)
# print('This is X_lab')
# print(CC_lab)
# Convert droplet RGBs into LAB values
#print("Lab")
#sample_lab = convert_LAB_RGB(drop, 'LAB', 'RGB')
#sample_lab = cv2.cvtColor(drop, cv2.COLOR_RGB2LAB)
#drop = cv2.cvtColor(drop, cv2.COLOR_BGR2RGB)
# drop is BGR since RRC used 'cv2'
sample_lab = cv2.cvtColor((drop/255).astype('float32'), cv2.COLOR_BGR2LAB)
# Reorganize the reference to match the colors orders in the color card
# X_hi_lab = X_hi_lab[order]
# X_lo_lab = X_lo_lab[order]
# sample_lab = sample_lab[order]
###########################
# Color calibration starts.
#print("Section 1")
# Number of color patches in the color chart.
N_patches = 24
# Let's create the weight matrix for color calibration using 3D thin plate
# spline.
# Data points of our color chart in the original space.
# Pi = [1 l_color1 a_color1 b_color_1]
# 24 x 4
P = np.concatenate((np.ones((N_patches,1)), CC_lab), axis=1)
# Data points of our color chart in the transformed space.
# 24x3
V = reference_CC_lab
# Shape distortion matrix, K
# 24x24
K = np.zeros((N_patches,N_patches))
R = np.zeros((N_patches,N_patches))
# %%timeit -n 1000
# How to calculate K_IJ w/o a for loop
X = P[:,1].reshape((1,N_patches))
Y = P[:,2].reshape((1,N_patches))
Z = P[:,3].reshape((1,N_patches))
X_mat = np.zeros((N_patches,N_patches))
X_mat[:,:] = np.transpose(X)
Y_mat = np.zeros((N_patches,N_patches))
Y_mat[:,:] = np.transpose(Y)
Z_mat = np.zeros((N_patches,N_patches))
Z_mat[:,:] = np.transpose(Z)
rx = np.square(X_mat - X)
ry = np.square(Y_mat - Y)
rz = np.square(Z_mat - Z)
r= np.sqrt(rx+ry+rz)
K= 2* np.multiply(np.square(r),np.log(r+10**(-20)))
# Linear and non-linear weights WA:
numerator = np.concatenate((V, np.zeros((4,3))), axis=0)
denominator = np.concatenate((K,P), axis=1)
denominator = np.concatenate((denominator,
np.concatenate((np.transpose(P),
np.zeros((4,4))),axis=1)), axis=0)
WA = np.matmul(np.linalg.pinv(denominator), numerator)
#print("check")
# Checking if went ok. We should get the same result than in V (except for
# the 4 bottom rows)
CC_lab_double_transformation = np.matmul(denominator,WA)
#print('Color chart patches in reference Lab:', reference_CC_lab,
# 'Color chart patches transformed to color calibrated space and back - this should be the same than above apart
# from the last 4 rows',
# CC_lab_double_transformation, 'subtracted: ', reference_CC_lab-CC_lab_double_transformation[0:-4,:])
# print('Checking if color transformation is successful - all values here should be near zero:/n', reference_CC_lab
# CC_lab_double_transformation[0:-4,:])
#print("Section 2")
# Let's perform color calibration for the sample points!
N_samples = sample_lab.shape[0]
N_times = sample_lab.shape[1]
# print(K_test)
P_new = np.ones((N_samples, N_times,4))
P_new[:,:,1:4] = np.copy(sample_lab)
# 190x560x4
K_new = np.zeros((N_samples, N_times,N_patches))
# 190x560x24
X_n = P_new[:,:,1].reshape((N_samples,1,N_times))
Y_n = P_new[:,:,2].reshape((N_samples,1,N_times))
Z_n = P_new[:,:,3].reshape((N_samples,1,N_times))
#190x1x560
X_mat = np.zeros((N_samples,N_times,N_patches))
X_mat[:,:] = X_n.reshape((N_samples,N_times,1))
Y_mat = np.zeros((N_samples,N_times,N_patches))
Y_mat[:,:] = Y_n.reshape((N_samples,N_times,1))
Z_mat = np.zeros((N_samples,N_times,N_patches))
Z_mat[:,:] = Z_n.reshape((N_samples,N_times,1))
# (190, 560, 24)
X_sub = np.zeros((N_samples,1,N_patches))
X_sub[:,:] = X
Y_sub = np.zeros((N_samples,1,N_patches))
Y_sub[:,:] = Y
Z_sub = np.zeros((N_samples,1,N_patches))
Z_sub[:,:] = Z
rx = np.square(X_mat - X_sub)
ry = np.square(Y_mat - Y_sub)
rz = np.square(Z_mat - Z_sub)
# 190,560,24
r= np.sqrt(rx+ry+rz)
K_new =2* np.multiply(np.square(r),np.log(r+10**(-20)))
#190,560,24
K_tt = np.copy(K_new)
P_sub = np.zeros((N_samples,N_patches,4))
# 190x24x4
P_sub[:,:] = P
dennom = np.concatenate((K_new,P_new),axis=2)
denden = np.concatenate((P_sub.reshape(N_samples,4,N_patches), np.zeros((N_samples,4,4))), axis=2)
# Try changing P to P_new to see if this fixes the color calibration errors
sample_lab_cal = np.matmul(np.concatenate((dennom, denden), axis=1), WA)
# Remove zeros, i.e., the last four rows from the third dimension.
sample_lab_cal = sample_lab_cal[:,0:-4,:]
#print("sample_lab_cal dtype")
#print(sample_lab_cal.dtype)
# 190x560x3
################################
# Color calibration is done now.
# Let's transform back to rgb.
# print("Transform")
##sample_rgb_cal = convert_LAB_RGB(sample_lab_cal, 'RGB', 'LAB')
#print("float 64")
#print(sample_lab_cal)
#print(np.rint(sample_lab_cal).astype(np.int8))
#print("float32")
#print(sample_lab_cal.astype(np.float32))
sample_rgb_cal = cv2.cvtColor(sample_lab_cal.astype('float32'), cv2.COLOR_LAB2RGB)
sample_rgb_cal = sample_rgb_cal*255
#sample_rgb_cal = convert_LAB_RGB(sample_lab_cal, 'RGB', 'N')
# X_hi_lab = convert_LAB_RGB([XR_hi,XG_hi,XB_hi], 'LAB')
# X_lo_lab = convert_LAB_RGB([XR_lo, XG_lo, XB_lo],'LAB')
# Let's return both lab and rgb calibrated values.
return sample_rgb_cal, sample_lab_cal, CC_lab, sample_lab, WA
def Results(xc_ll_x, xc_ll_y, wid_x, hei_x,crop_params,num_pics,time,names,start_img,verbose=False):
# This function runs color calibration on the droplets over all files
# Code structure: For each image extract all calibrated droplet colors
# Inputs:
# xc_ll_x, xc_ll_y = The crop box lower left and lower right indices for Xrite Colour Checker
# wid_x, hei_x = The width and height of the crop boxes for Xrite Colour Checker
# crop params = Crop parameters of the reference image
# Time_steps = Time step between each image defined in the sample_names spreadsheet
# start_img = The index of the starting image in the image time series
# (removes the first couple of frames of the timeseries data)
# Outputs:
# drops_rgb_cal_x, drops_lab_cal_x = RGB and LAB colour images of the color clibrated images
# where drops_rgb_cal_x[x][0] is the color clibrated image at time step x from the start image
# cut = The number of the images skiped between in your data set in order to reduce processign time.
# If cut = 1 you extract the color data of every image
drops_rgb_cal_x = []
drops_lab_cal_x = []
CC_out= []
sample_out= []
WA_out= []
reference_CC_lab = np.array([[37.54,14.37,14.92],[62.73,35.83,56.5],[28.37,15.42,-49.8],
[95.19,-1.03,2.93],[64.66,19.27,17.5],[39.43,10.75,-45.17],
[54.38,-39.72,32.27],[81.29,-0.57,0.44],[49.32,-3.82,-22.54],
[50.57,48.64,16.67],[42.43,51.05,28.62],[66.89,-0.75,-0.06],
[43.46,-12.74,22.72],[30.1,22.54,-20.87],[81.8,2.67,80.41],
[50.76,-0.13,0.14],[54.94,9.61,-24.79],[71.77,-24.13,58.19],
[50.63,51.28,-14.12],[35.63,-0.46,-0.48],[70.48,-32.26,-0.37],
[71.51,18.24,67.37],[49.57,-29.71,-28.32],[20.64,0.07,-0.46]])
file = './Images_simp/xrite.jpg'
XR,XG,XB, XR_hi,XG_hi,XB_hi, XR_lo,XG_lo,XB_lo = CC_RGB(xc_ll_x, xc_ll_y, wid_x,hei_x,file)
# Reference data is in different order (from upper left to lower left, upper
# 2nd left to lower 2nd left...).
for k in range(num_pics):
# Crop the image
file_k = f"./Images_simp/{names[k]}"
drop = RRC(file_k, crop_params, 'cv2')
# Calibrate the image
drop_rgb_cal, drop_lab_cal,CC,sample,wa = color_calibration(drop, xc_ll_x, xc_ll_y, wid_x,hei_x, file,XR,XG,XB, XR_hi,XG_hi,XB_hi,XR_lo,XG_lo,XB_lo, reference_CC_lab)
# Store Calibrated Images
drops_rgb_cal_x.append([drop_rgb_cal])
drops_lab_cal_x.append([drop_lab_cal])
CC_out.append([CC])
sample_out.append([sample])
WA_out.append([wa])
if verbose:
print(f'DONE {k+1}/{num_pics}')
print('DONE Results')
return drops_rgb_cal_x, drops_lab_cal_x, CC_out, sample_out, WA_out
# Define sample Class
class Sample:
def __init__(self, crop_params, crop_img_pil, sample_ID,drop_IDs,drops, Number_of_drops, water, img_erode,
crop_image_erode, PIL_crop_image_erode, img_name, Notes, PTL):
# drops is a 3D array with the eroded droplet image in each layer
# SAMPLE_ID is a string with the sample ID/name assigned in the spreadsheet
# drops_IDs is an array with each of the droplet numbers in it
# drops is a 3D array where each layer is zero everywhere and except the location of a single droplet (comes from
# cv2)
# Water is the watershed result
# img_erode is a 2D array with the eroded watershed result
# crop_image_erode and PIL_crop_image_erode are images with the watershed droplets super imposed on the cropped
# image
# crop_image_erode is estracted by CV2 format and PIL_crop_image_erode is extracted by PIL
# img_name is the name of the image used for watershed
# Notes are any experimental Notes
# PTL is the pixel to lenght conversion factor
self.ID = sample_ID
self.drops = drops
self.drop_IDs = drop_IDs
self.Number_of_drops = Number_of_drops
self.img_erode = img_erode
self.cv2_eroded = crop_image_erode
self.PIL_eroded = PIL_crop_image_erode
self.water = water
self.img_name = img_name
self.Notes = Notes
self.PTL = PTL
self.crop_img_pil = crop_img_pil
self.crop_params = crop_params
def save(self):
# save all the sample information in a pickle format
pickle.dump(self.ID, open('./Sample_set_simple/sample_ID','wb'))
pickle.dump(self.drops, open('./Sample_set_simple/drops','wb'))
pickle.dump(self.drop_IDs, open('./Sample_set_simple/drop_IDs','wb'))
pickle.dump(self.Number_of_drops, open('./Sample_set_simple/Number_of_drops','wb'))
pickle.dump(self.img_erode, open('./Sample_set_simple/img_erode','wb'))
pickle.dump(self.cv2_eroded, open('./Sample_set_simple/crop_image_erode','wb'))
pickle.dump(self.PIL_eroded, open('./Sample_set_simple/PIL_crop_image_erode','wb'))
pickle.dump(self.water, open('./Sample_set_simple/water','wb'))
pickle.dump(self.img_name, open('./Sample_set_simple/img_name','wb'))
pickle.dump(self.Notes, open('./Sample_set_simple/Notes','wb'))
pickle.dump(self.PTL, open('./Sample_set_simple/PTL','wb'))
pickle.dump(self.crop_params, open('./Sample_set_simple/crop_params','wb'))
print('Saved sample data')
def point_ave(min_dis,pt_xy, verbose):
## Get rid of points that are too close to each other
### Find the points that are too close to each other
dis = np.empty(shape=(len(pt_xy),len(pt_xy)))
I=[]
J=[]
pt_x = np.array(pt_xy[:,0])
pt_y = np.array(pt_xy[:,1])
for i in range(len(pt_x)):
for j in range(len(pt_x)):
if j!=i:
dis[i,j]=np.sqrt((pt_x[i]-pt_x[j])**2 + (pt_y[i]-pt_y[j])**2)
if dis[i,j]<min_dis:
I = np.append(I,i)
J = np.append(J,j)
if j==i:
dis[i,j]=0
I = np.reshape(I,(len(I),1))
J = np.reshape(J,(len(J),1))
indices = np.concatenate((I,J),1)
# print("Indices = all poins that are too close to each other")
# print(indices)
# Find points that are all alone
alone = np.array(list(set(np.arange(0,len(pt_xy))) - set(np.unique(indices))))
# print('Alone')
# print(alone)
#print(indices)
### Get rid of repeated points (ie. [1,2]=[2,1])
#### Use the same procedure as above, turn into a string to compare than turn back into numbers
cl_pts = []
cl_1 = []
cl_2 = []
for i in range(len(I)):
cl_pt= str(indices[i][0].astype('uint8'))+str(indices[i][1].astype('uint8'))
cl_pt_rev = str(indices[i][1].astype('uint8'))+str(indices[i][0].astype('uint8'))
if cl_pt not in cl_pts and cl_pt_rev not in cl_pts:
cl_1 = np.append(cl_1, indices[i][0].astype('uint8'))
cl_2 = np.append(cl_2, indices[i][1].astype('uint8'))
cl_pt_arr = [cl_pt]
cl_pts = np.concatenate((cl_pts,cl_pt_arr),0)
# print('Cl_pts = repeats are removed')
# print(cl_pts)
cl_pts_num = np.empty((len(cl_pts),2))
for i in range(len(cl_pts)):
for j in range(2):
if j ==0:
cl_pts_num[i][j]=cl_1[i]
else:
cl_pts_num[i][j]=cl_2[i]
# print("cl_pts_num = cl_pts but as numbers")
# print(cl_pts_num)
### Average the two points that are too close
for i in range(len(cl_pts_num)):
pt1x = pt_xy[int(cl_pts_num[i][0])][0]
pt1y = pt_xy[int(cl_pts_num[i][0])][1]
pt2x = pt_xy[int(cl_pts_num[i][1])][0]
pt2y = pt_xy[int(cl_pts_num[i][1])][1]
ptx = (pt1x+pt2x)/2
pty = (pt1y+pt2y)/2
if i == 0:
cor = [[ptx,pty]]
else:
cor = np.concatenate((cor, [[ptx,pty]]),0)
### Create the final list of points
rep= []
tbd_2 = np.unique(cl_pts_num[:,1])
for i in range(len(cl_pts_num)):
if cl_pts_num[i][0] not in tbd_2:
if cl_pts_num[i][0] not in rep:
rep = np.concatenate([rep, [cl_pts_num[i][0]]])
if i == 0:
cor_f = [cor[i]]
else:
cor_f = np.concatenate([cor_f,[cor[i]]])
else:
cor_f = cor_f
else:
cor_f = cor_f
if len(alone)!=0:
if len(cl_pts_num)==0:
cor_f = pt_xy[alone]
else:
cor_f = np.concatenate([cor_f,pt_xy[alone]])
if len(cl_pts_num) ==0 and len(alone) == 0:
raise Exception("No Points where detected at all, please change parameters for better edge detection")
# print(cor_f)
return cl_pts_num,cor_f
def Edge_detect(img_erode,crop_image,ap_size,k,lb,ub,c,block,ts,srn,stn,ang_min,k_del,k_edge,k_edge_er,verbose):
# For more explanation on there parameters see : https://docs.opencv.org/3.4/dd/d1a/group__imgproc__feature.html#ga8618180a5948286384e3b7ca02f6feeb
# linesP = cv2.HoughLinesP(edged, 1, np.pi / 180, ts, None, srn,stn)
########################################################################################################
ang_max = 180-ang_min
# Image Processes
## Copy the image to be warped
im = np.copy(crop_image)
im2= np.copy(crop_image)
## Image processing RGB>GRAY>threshold>edges
kernel1 = np.ones((k,k), np.uint8)
imer = cv2.dilate(im,kernel1)
imer = cv2.erode(imer,kernel1)
imgray = cv2.cvtColor(imer, cv2.COLOR_RGB2GRAY)
thresh = cv2.adaptiveThreshold(imgray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block, c)
#kernel = np.array([[0, -1, 0], [-1, 4.5, -1], [0, -1, 0]])
# Sharpen the image
#im_sharp = cv2.filter2D(imgray, -1, kernel)
#im_sharp = cv2.dilate(im_sharp,kernel1)
#ret, thresh1 = cv2.threshold(im_sharp, ta, 255, cv2.THRESH_BINARY)
edged = cv2.Canny(thresh, lb, ub,apertureSize=ap_size)
# print('np.unique(edged)')
# print(np.unique(edged))
# print('Size of segmented image and size of edged image')
# print(edged.shape)
# print(img_erode.shape)
# Get rid of droplets to remove noise from line transform
fig, ax = plt.subplots()
ax.imshow(img_erode)
plt.title('Droplet Segmentation')
if verbose:
plt.show()
plt.close()
# Dilate image erode by a lot then remove those pixels from the edged image
# DIlate img_erode
kernel2 = np.ones((k_del,k_del), np.uint8)
img_del_drops = np.copy(img_erode)
img_del_drops = cv2.dilate(img_del_drops,kernel2)
# Remove droplet pixels from the edged image
edged[np.where(img_del_drops !=0)] = 0
# dialate the edged image to remove noise and close holes
kernel3 = np.ones((k_edge_er,k_edge_er), np.uint8)
kernel4 = np.ones((k_edge,k_edge), np.uint8)
#edged = cv2.erode(edged,kernel3)
edged = cv2.dilate(edged,kernel4)
edged = cv2.erode(edged,kernel3)
## Use probalistic Hough Lines transfrom to find the lines in the image that shoudl correspond to the edge of the glass slide
linesP = cv2.HoughLinesP(edged, 1, np.pi / 180, ts, None, srn,stn)
## Initiate empty vectors to store the slopes and y-intercepts of all the detected lines
M = []
B = []
if linesP is not None:
# for each line find the end points and calculate to lines slope (m) and y-intercept(b)
for i in range(0, len(linesP)):
l = linesP[i][0]
cv2.line(im2, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv2.LINE_AA)
m = (l[3]-l[1])/(l[2]-l[0])
b = -m*l[0] + l[1]
M = np.append(M,[m])
B = np.append(B,[b])
## Define the figure for visual validation of computer vision algorythm
# Check for lines that are too close to each other
vects=np.ones((2,len(M)))
vects[1,:] = M
# print('All Lines as vectors')
# print(vects)
# Find the angle between all the detected lines
ab_invcos = np.zeros((len(M),len(M)))
for i in range(len(M)):
for j in range(len(M)):
a = vects[:,i]
b = vects[:,j]
if i != j:
# print(np.dot(a,b)/(np.linalg.norm(a)*np.linalg.norm(b)))
inv = np.dot(a,b)/(np.linalg.norm(a)*np.linalg.norm(b))
if abs(inv)>1:
inv = math.copysign(1, inv)
ab_invcos[i,j] = math.acos(inv)
# ab_invcos in an nxn vector of the angles between all the detected lines
ab_invcos = ab_invcos*180/math.pi
# print('Angle Between all lines in Degrees')
# print(ab_invcos.astype(int))
# Use the results of Image Processing to find the corners of the glass slide
max_x = np.size(crop_image,1)
max_y = np.size(crop_image,0)
pts_x = np.empty(shape=(len(M),len(M)))
pts_y = np.empty(shape=(len(M),len(M)))
pt_x=[]
pt_y=[]
## find all interceps between the lines and plot the intersections
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
pts_x[i,j] = (B[j]-B[i])/(M[i]-M[j])
pts_y[i,j] = M[i]*pts_x[i,j] + B[i]
# ax[3].plot(pts_x[i,j],pts_y[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are within the image, save them as potential corner points and plot them
else:
pts_x[i,j] = 0
pts_y[i,j] = 0
##############################################################################################
# print('pts_x and y')
# print(pts_x.astype(int))
# print(pts_y.astype(int))
# Find lines that have an angle difference of less than 10degrees and intersect in the image
# find indices of lines that intersect
pts_xm = np.copy(pts_x)
pts_ym = np.copy(pts_y)
pts_xm[np.where(pts_xm<=0)] = 0
pts_ym[np.where(pts_ym<=0)] = 0
pts_xm[np.where(pts_xm>max_x)] = 0
pts_ym[np.where(pts_ym>max_y)] = 0
pt_xym = np.zeros((len(M),len(M)))
pts_xm[np.where(pts_ym == 0)] = 0
pts_ym[np.where(pts_xm == 0)] = 0
pt_xym = pts_xm+pts_ym
lines = np.where(pt_xym!=0)
lines = np.array(lines)
lines_sorted = np.sort(lines,0)
inter_index = np.unique(lines_sorted, axis = 1)
# print('inter_index')
# print(inter_index)
# Find lines that have an angle between them that is less than 10 degrees
# set every other angle to 1
angles = np.copy(ab_invcos)
# print('angles before changes')
# print(angles.astype(int))
angles1 = np.zeros_like(ab_invcos)
# print('np.where(angles>ang_min)')
# print(np.where(angles>ang_min))
# print(angles[np.where(angles>ang_min)])
zero_ind = np.array(np.where(angles>ang_min))
one_ind = np.array(np.where(angles<=ang_min))
refl = np.array(np.where(angles>=ang_max))
angles[zero_ind[0],zero_ind[1]] = 0
angles[one_ind[0],one_ind[1]] = 1
angles[refl[0],refl[1]] = 1
# 1 for all pairs of line closer than 10 degrees
# Find lines with an angle between them less then 10 degrees and that intersect
# Lines that intersect
angles1[inter_index[0],inter_index[1]] = 1
# 1 for all pairs that intersect
# Sum them up and the places that are 2 are where both conditions are true
lines_TBD = angles+angles1
lines_tc = np.array(np.where(lines_TBD==2))
# print(f' 0 where >{ang_min} and 1 where <{ang_min} and >{ang_max}')
# print(angles.astype(int))
# print(' 0 where lines do not intersect and 1 where they do ')
# print(angles1.astype(int))
# print('Lines_TBD')
# print(lines_TBD.astype(int))
# print('lines_tc')
# print(lines_tc.astype(int))
# find points to plot
pts_xc = np.copy(pts_x)
pts_yc = np.copy(pts_y)
# print('pts_xc and yc after copying ')
# print(pts_xc.astype(int))
# print(pts_yc.astype(int))
fig, ax = plt.subplots(1)
ax.imshow(im)
ax.set_xlim(0,np.size(crop_image,1))
ax.set_ylim(0,np.size(crop_image,0))
ax.invert_yaxis()
plt.title('pts_xc and yc after copying ')
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
ax.plot(pts_x[i,j],pts_y[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are wi
if verbose:
plt.show()
plt.close()
# make sure they are not too large
max_xtbd = np.array(np.where(pts_xc>=max_x))
max_ytbd = np.array(np.where(pts_yc>=max_y))
pts_xc[max_xtbd[0],max_xtbd[1]] = 0
pts_yc[max_ytbd[0],max_ytbd[1]] = 0
# print(f'Remove x > {max_x} and y >{max_y}')
# print(pts_xc.astype(int))
# print(pts_yc.astype(int))
fig, ax = plt.subplots(1)
ax.imshow(im)
ax.set_xlim(0,np.size(crop_image,1))
ax.set_ylim(0,np.size(crop_image,0))
ax.invert_yaxis()
plt.title(f'Remove x > {max_x} and y >{max_y}')
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
ax.plot(pts_xc[i,j],pts_yc[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are wi
if verbose:
plt.show()
plt.close()
# make sure they are not too small
min_xtbd = np.array(np.where(pts_xc<=0.0))
min_ytbd = np.array(np.where(pts_yc<=0.0))
# print(min_xtbd)
# print(min_ytbd)
pts_xc[min_xtbd[0],min_xtbd[1]] = 0
pts_yc[min_ytbd[0],min_ytbd[1]] = 0
# print(f'Remove x and y <0')
# print(pts_xc)
# print(pts_yc)
fig, ax = plt.subplots(1)
ax.imshow(im)
ax.set_xlim(0,np.size(crop_image,1))
ax.set_ylim(0,np.size(crop_image,0))
ax.invert_yaxis()
plt.title(f'Remove x and y <0')
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
ax.plot(pts_xc[i,j],pts_yc[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are wi
if verbose:
plt.show()
plt.close()
# Remove points of intersection for lines that are too close in angle
pts_xc[lines_tc[0],lines_tc[1]] = 0
pts_yc[lines_tc[0],lines_tc[1]] = 0
pts_xc[lines_tc[1],lines_tc[0]] = 0
pts_yc[lines_tc[1],lines_tc[0]] = 0
# print('Remove points for lines that are too close in angle')
# print(pts_xc.astype(int))
# print(pts_yc.astype(int))
fig, ax = plt.subplots(1)
ax.imshow(im)
ax.set_xlim(0,np.size(crop_image,1))
ax.set_ylim(0,np.size(crop_image,0))
ax.invert_yaxis()
plt.title('Remove points for lines that are too close in angle')
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
ax.plot(pts_xc[i,j],pts_yc[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are wi
if verbose:
plt.show()
plt.close()
# Get ride of points n the edges of the photo
edge_xtbd = np.array(np.where(pts_yc==0))
edge_ytbd = np.array(np.where(pts_xc==0))
pts_xc[edge_xtbd[0],edge_xtbd[1]] = 0
pts_yc[edge_ytbd[0],edge_ytbd[1]] = 0
# print('Removed points too close to the edge pts_xc and yc')
# print(pts_xc.astype(int))
# print(pts_yc.astype(int))
fig, ax = plt.subplots(1)
ax.imshow(im)
ax.set_xlim(0,np.size(crop_image,1))
ax.set_ylim(0,np.size(crop_image,0))
ax.invert_yaxis()
plt.title('Removed points too close to the edge pts_xc and yc')
for i in range(len(M)):
for j in range(len(M)):
# Between every line (ie line 1 and 3) find the x(pts_x) and y(pts_y) intercept of the lines
if j != i:
ax.plot(pts_xc[i,j],pts_yc[i,j], marker="o", markersize=1, markeredgecolor="red",markerfacecolor="red")
# If the points are wi
if verbose:
plt.show()
plt.close()
# find lines with > 3 intersections
# find the points that are farthest apart on that line
# Take that lines columns for x and y points
# calculate farthest points
# keep those points and delete other points
######################################################################
pts_xc_u = np.triu(pts_xc)
pts_yc_u = np.triu(pts_yc)
intersection_counts = np.zeros(len(M))
for i in range(len(M)):
for j in range(len(M)):
if i != j:
if pts_xc_u[i,j] !=0 and pts_yc_u[i,j]!=0:
intersection_counts[i] += 1
# Define a threshold for the number of intersections
intersection_threshold = 3
# print("Step 1")
# print(pts_xc_u.astype(int))
# print(pts_yc_u.astype(int))
# print(intersection_counts)
invalid_lines = np.where(intersection_counts >=intersection_threshold)[0]
for i in range(len(invalid_lines)):
row_x = np.copy(pts_xc[invalid_lines[i],:])
row_y = np.copy(pts_yc[invalid_lines[i],:])
x_points = row_x[np.where(row_x!=0)]
y_points = row_y[np.where(row_y!=0)]
# print('Points x and y')
# print(x_points)
# print(y_points)
#################################################################################################################
# create the final pt_x and pt_y array
f_x = np.array(np.where(pts_xc!=0))
f_y = np.array(np.where(pts_yc!=0))
pt_x = pts_x[f_x[0],f_x[1]]
pt_y = pts_y[f_y[0],f_y[1]]
# print('final pt_x and pt_y')
# print(pt_x)
# print(pt_y)
# Getting rid of middle lines
# Idea - if on of the lines has three intersections with one point between the other two,
# get rid of the line with the center point
# find lines with > 3 intersections