1
+ import matplotlib .pyplot as plt
1
2
import numpy as np
3
+ import numpy .ma as ma
2
4
import tensorflow as tf
3
5
from tensorflow .keras import Input
4
- from tensorflow .keras .layers import (
5
- Activation ,
6
- BatchNormalization ,
7
- Conv2D ,
8
- Conv2DTranspose ,
9
- Dense ,
10
- Flatten ,
11
- LeakyReLU ,
12
- Reshape ,
13
- )
6
+ from tensorflow .keras .layers import (Activation , BatchNormalization , Conv2D ,
7
+ Conv2DTranspose , Dense , Flatten ,
8
+ LeakyReLU , Reshape )
14
9
from tensorflow .keras .models import Model
15
10
from tensorflow .keras .optimizers import Adam
11
+
16
12
from models import BaseModel
13
+ from utils .plots import *
17
14
18
15
19
- class AdvancedModel (BaseModel ):
16
+ class ConvolutionalAutoencoderModel (BaseModel ):
20
17
def __init__ (self , config ):
21
18
super ().__init__ (config )
22
19
23
20
def create_optimizer (self , optimzer = "adam" ):
24
21
super ().create_optimizer (optimzer )
25
22
26
- def create_model (self , filters = (32 , 64 ), latent_dim = 16 ):
23
+ def compile (self , loss = "mse" ):
24
+ self .model .compile (loss = loss , optimizer = self .optimizer , metrics = ["accuracy" ])
25
+
26
+ def create_model (self ):
27
+ filters = (32 , 64 )
28
+ kernel_size = (3 ,3 )
29
+ try :
30
+ model_config = self .config .train .raw ["convolutional_autoencoder_model" ]
31
+ latent_dim = model_config ["latent_dim" ]
32
+ except :
33
+ latent_dim = 16
34
+
35
+
27
36
input_shape = self .config .input_shape
28
37
inputs = Input (shape = input_shape , name = self .input_name )
29
38
x = inputs
30
39
for f in filters :
31
- x = Conv2D (filters = f , kernel_size = ( 3 , 3 ) , strides = 2 , padding = "same" )(x )
40
+ x = Conv2D (filters = f , kernel_size = kernel_size , strides = 2 , padding = "same" )(x )
32
41
x = LeakyReLU (alpha = 0.2 )(x )
33
42
x = BatchNormalization (axis = input_shape [2 ])(x )
34
43
volume_size = tf .keras .backend .int_shape (x )
@@ -41,11 +50,11 @@ def create_model(self, filters=(32, 64), latent_dim=16):
41
50
x = Reshape ((volume_size [1 ], volume_size [2 ], volume_size [3 ]))(x )
42
51
for f in filters [::- 1 ]:
43
52
x = Conv2DTranspose (
44
- filters = f , kernel_size = ( 3 , 3 ) , strides = 2 , padding = "same"
53
+ filters = f , kernel_size = kernel_size , strides = 2 , padding = "same"
45
54
)(x )
46
55
x = LeakyReLU (alpha = 0.2 )(x )
47
56
x = BatchNormalization (axis = input_shape [2 ])(x )
48
- x = Conv2DTranspose (filters = input_shape [2 ], kernel_size = ( 3 , 3 ) , padding = "same" )(
57
+ x = Conv2DTranspose (filters = input_shape [2 ], kernel_size = kernel_size , padding = "same" )(
49
58
x
50
59
)
51
60
outputs = Activation ("sigmoid" , name = self .output_name )(x ) # Decoded
@@ -54,9 +63,5 @@ def create_model(self, filters=(32, 64), latent_dim=16):
54
63
self .model = Model (inputs , decoder (encoder (inputs )), name = "autoencoder" )
55
64
return self .model
56
65
57
- def overwrite_optimizer (self , optimizer , optimizer_name ):
58
- self .optimzer = optimizer
59
- self .optimizer_name = optimizer_name
60
-
61
- def compile (self , loss = "mse" ):
62
- self .model .compile (loss = loss , optimizer = self .optimizer , metrics = ["accuracy" ])
66
+ def plot_predictions (self , test_images ):
67
+ plot_difference (self .config , self .predictions , test_images )
0 commit comments