diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9db41db --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +__pycache__ +.DS_Store +*.zip +env +model +.ipynb_checkpoints \ No newline at end of file diff --git a/README.md b/README.md index 4cc263f..ee84d9d 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,20 @@ Code and pretrained *Real-world* CNN model for: *Benedikt Lorch, Shruti Agarwal, Hany Farid. Forensic Reconstruction of Severely Degraded License Plates. Media Watermarking, Security, and Forensics 2019, Burlingame, CA, USA, MWSF-529.* [bibtex](http://cris.fau.de/bibtex/publication/209464175.bib) +[Forensic Reconstruction of Severely Degraded License Plates - PDF](https://faui1-files.cs.fau.de/public/mmsec/license-plates/ei2019_forensic_reconstruction_of_severely_degraded_license_plates.pdf) + +[Can you enhance it? - Forensic Reconstruction of Severely Degraded License Plates - Slides - PDF](https://faui1-files.cs.fau.de/public/mmsec/license-plates/ei2019_forensic_reconstruction_of_severely_degraded_license_plates_slides.pdf) + ## Getting Started -Tested on Ubuntu 16.04 with Python 3.5, TensorFlow 1.4.0 and 1.10.1. +Tested on MacOS 10.14 with Python 3.7, TensorFlow 2.1.0. + + +### Requirements +[Python 3](https://www.python.org/downloads/) + +[LaTeX 3](https://www.latex-project.org/get/) ### Installation @@ -18,14 +28,34 @@ git clone https://github.com/btlorch/license-plates.git cd license-plates ``` +Setup python virtual environmennt +```bash +python3 -m pip install --user --upgrade pip +python3 -m pip install --user virtualenv +python3 -m venv env +source env/bin/activate +``` + Inside your virtual environment install required packages. ```bash pip install -r requirements.txt ``` [Download trained model](https://faui1-files.cs.fau.de/public/mmsec/license-plates/license-plates-trained-model.zip) to `/model` or a directory of your choice. + +[Alternate link to trained model](https://cs.iusb.edu/~csolovey/license-plates/license-plates-trained-model.zip) ```bash wget https://faui1-files.cs.fau.de/public/mmsec/license-plates/license-plates-trained-model.zip +``` + +### File verification on trained model download +```bash +shasum -a 256 license-plates-trained-model.zip + ec2fad6f73e123e181780476b6e36b34228efde475fa1e053c45bd2a17234536 license-plates-trained-model.zip +``` + +### Unzip trained model to model directory +```bash unzip license-plates-trained-model.zip -d model ``` diff --git a/requirements.txt b/requirements.txt index ef4fa9a..ea48e72 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,14 +1,14 @@ -absl-py==0.4.1 +absl-py==0.7.0 astor==0.7.1 backcall==0.1.0 bleach==2.1.4 cloudpickle==0.5.5 cycler==0.10.0 -dask==0.19.0 +dask==1.0.0 decorator==4.3.0 entrypoints==0.2.3 -gast==0.2.0 -grpcio==1.14.2 +gast==0.2.2 +grpcio==1.24.3 h5py==2.8.0 html5lib==1.0.1 imageio==2.4.0 @@ -32,16 +32,15 @@ nbconvert==5.3.1 nbformat==4.4.0 networkx==2.1 notebook==5.7.8 -numpy==1.14.5 +numpy==1.16.0 pandocfilters==1.4.2 parso==0.3.1 pexpect==4.6.0 pickleshare==0.7.4 Pillow==5.2.0 -pkg-resources==0.0.0 prometheus-client==0.3.1 prompt-toolkit==1.0.15 -protobuf==3.6.1 +protobuf==3.8.0 ptyprocess==0.6.0 Pygments==2.2.0 pyparsing==2.2.0 @@ -50,13 +49,13 @@ pytz==2018.5 PyWavelets==1.0.0 pyzmq==17.1.2 qtconsole==4.4.1 -scikit-image==0.14.0 -scipy==1.1.0 +scikit-image==0.14.2 +scipy==1.4.1 Send2Trash==1.5.0 simplegeneric==0.8.1 -six==1.11.0 -tensorboard==1.10.0 -tensorflow==1.10.1 +six==1.12.0 +tensorboard==2.1.0 +tensorflow==2.1.0 termcolor==1.1.0 terminado==0.8.1 testpath==0.3.1 diff --git a/src/demo.ipynb b/src/demo.ipynb index f330c0f..f45c807 100644 --- a/src/demo.ipynb +++ b/src/demo.ipynb @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "sess = tf.InteractiveSession()\n", + "sess = tf.compat.v1.InteractiveSession()\n", "cnn = LicensePlatesCNN(sess, checkpoint_dir, summary_dir)\n", "cnn.load();" ] diff --git a/src/eval.py b/src/eval.py index c722295..bbd238c 100644 --- a/src/eval.py +++ b/src/eval.py @@ -9,7 +9,7 @@ def eval(test_data, store_results_path, input_channels=3, checkpoint_dir="checkpoint", summary_dir="summary"): - with tf.Session() as sess: + with tf.compat.v1.Session() as sess: cnn = LicensePlatesCNN(sess=sess, checkpoint_dir=checkpoint_dir, summary_dir=summary_dir, diff --git a/src/model.py b/src/model.py index 382579e..af0e36a 100644 --- a/src/model.py +++ b/src/model.py @@ -6,6 +6,8 @@ import h5py import time import os +tf.compat.v1.disable_eager_execution() + log = setup_custom_logger("LicensePlatesCNN") @@ -106,11 +108,11 @@ def __init__(self, def _build_model(self): # Input 100x50 images - self._images = tf.placeholder(tf.float32, [None, 50, 100, self._input_channels], name="images") + self._images = tf.compat.v1.placeholder(tf.float32, [None, 50, 100, self._input_channels], name="images") # Placeholder for output labels of size batch_size x num_characters x num_distinct_characters (including null chararacter) - self._char_labels = tf.placeholder(tf.float32, [None, self._max_length, self._num_distinct_chars + 1], name="char_labels") + self._char_labels = tf.compat.v1.placeholder(tf.float32, [None, self._max_length, self._num_distinct_chars + 1], name="char_labels") # Dropout probability - self._drop_rate = tf.placeholder(tf.float32, name="drop_rate") + self._drop_rate = tf.compat.v1.placeholder(tf.float32, name="drop_rate") # Set up computational graph self._output, self._output_logits = self.model(self._images) @@ -118,7 +120,7 @@ def _build_model(self): self._set_up_eval_vars_and_ops() # Saving only relative paths is particularly useful when we copy a saved model to another machine - self._saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True) + self._saver = tf.compat.v1.train.Saver(max_to_keep=1, save_relative_paths=True) def _set_up_eval_vars_and_ops(self): """ @@ -130,17 +132,17 @@ def _set_up_eval_vars_and_ops(self): # Since we know the maximum length of the license numbers in advance, we can set up one tf.nn.in_top_k method for each character and stack the results char_top_k_accuracies = [] for i in range(self._max_length): - with tf.variable_scope("char_{:d}".format(i)): - class_ids_vector = tf.argmax(self._char_labels[:, i, :], axis=1) + with tf.compat.v1.variable_scope("char_{:d}".format(i)): + class_ids_vector = tf.argmax(input=self._char_labels[:, i, :], axis=1) top_k_accuracy = tf.nn.in_top_k(predictions=self._output[:, i, :], targets=class_ids_vector, k=self._report_accuracy_top_k) char_top_k_accuracies.append(top_k_accuracy) # Stack per-character results and average over all characters for each sample - char_top_k_accuracies = tf.reduce_mean(tf.cast(tf.stack(char_top_k_accuracies, axis=1), tf.float32), axis=1) + char_top_k_accuracies = tf.reduce_mean(input_tensor=tf.cast(tf.stack(char_top_k_accuracies, axis=1), tf.float32), axis=1) # Per character top k accuracy for each sample self._char_top_k_accuracies_samplewise = char_top_k_accuracies - self._char_top_k_accuracies_mean = tf.reduce_mean(char_top_k_accuracies) + self._char_top_k_accuracies_mean = tf.reduce_mean(input_tensor=char_top_k_accuracies) def model(self, input): """ @@ -150,14 +152,14 @@ def model(self, input): """ # (50, 100, 1) -> (50, 100, 64) - with tf.variable_scope("conv0"): + with tf.compat.v1.variable_scope("conv0"): conv0_weights = weights_variable_xavier([3, 3, self._input_channels, 64], name=CONV0_WEIGHTS) conv0_bias = bias_variable([64], value=0.1, name=CONV0_BIAS) conv0_z = conv2d(input, conv0_weights) + conv0_bias conv0_a = tf.nn.relu(conv0_z) # (50, 100, 64) -> (50, 100, 64) - with tf.variable_scope("conv1"): + with tf.compat.v1.variable_scope("conv1"): conv1_weights = weights_variable_xavier([3, 3, 64, 64], name=CONV1_WEIGHTS) conv1_bias = bias_variable([64], value=0.1, name=CONV1_BIAS) conv1_z = conv2d(conv0_a, conv1_weights) + conv1_bias @@ -165,121 +167,121 @@ def model(self, input): # (50, 100, 64) -> (25, 50, 64) # TODO does variable_scope make sense if pooling layers don't even have variables? - with tf.variable_scope("pool0"): - pool0 = tf.layers.max_pooling2d(conv1_a, pool_size=[2, 2], strides=2, padding="same") + with tf.compat.v1.variable_scope("pool0"): + pool0 = tf.compat.v1.layers.max_pooling2d(conv1_a, pool_size=[2, 2], strides=2, padding="same") # (25, 50, 64) -> (25, 50, 128) - with tf.variable_scope("conv2"): + with tf.compat.v1.variable_scope("conv2"): conv2_weights = weights_variable_xavier([3, 3, 64, 128], name=CONV2_WEIGHTS) conv2_bias = bias_variable([128], value=0.1, name=CONV2_BIAS) conv2_z = conv2d(pool0, conv2_weights) + conv2_bias conv2_a = tf.nn.relu(conv2_z) # (25, 50, 128) -> (25, 50, 128) - with tf.variable_scope("conv3"): + with tf.compat.v1.variable_scope("conv3"): conv3_weights = weights_variable_xavier([3, 3, 128, 128], name=CONV3_WEIGHTS) conv3_bias = bias_variable([128], value=0.1, name=CONV3_BIAS) conv3_z = conv2d(conv2_a, conv3_weights) + conv3_bias conv3_a = tf.nn.relu(conv3_z) # (25, 50, 128) -> (25, 50, 128) - with tf.variable_scope("pool1"): - pool1 = tf.layers.max_pooling2d(conv3_a, pool_size=[2, 2], strides=1, padding="same") + with tf.compat.v1.variable_scope("pool1"): + pool1 = tf.compat.v1.layers.max_pooling2d(conv3_a, pool_size=[2, 2], strides=1, padding="same") # (25, 50, 128) -> (25, 50, 256) - with tf.variable_scope("conv4"): + with tf.compat.v1.variable_scope("conv4"): conv4_weights = weights_variable_xavier([3, 3, 128, 256], name=CONV4_WEIGHTS) conv4_bias = bias_variable([256], value=0.1, name=CONV4_BIAS) conv4_z = conv2d(pool1, conv4_weights) + conv4_bias conv4_a = tf.nn.relu(conv4_z) # (25, 50, 256) -> (25, 50, 256) - with tf.variable_scope("conv5"): + with tf.compat.v1.variable_scope("conv5"): conv5_weights = weights_variable_xavier([3, 3, 256, 256], name=CONV5_WEIGHTS) conv5_bias = bias_variable([256], value=0.1, name=CONV5_BIAS) conv5_z = conv2d(conv4_a, conv5_weights) + conv5_bias conv5_a = tf.nn.relu(conv5_z) # (25, 50, 256) -> (13, 25, 256) - with tf.variable_scope("pool2"): - pool2 = tf.layers.max_pooling2d(conv5_a, pool_size=[2, 2], strides=2, padding="same") + with tf.compat.v1.variable_scope("pool2"): + pool2 = tf.compat.v1.layers.max_pooling2d(conv5_a, pool_size=[2, 2], strides=2, padding="same") # (13, 25, 256) -> (13, 25, 512) - with tf.variable_scope("conv6"): + with tf.compat.v1.variable_scope("conv6"): conv6_weights = weights_variable_xavier([3, 3, 256, 512], name=CONV6_WEIGHTS) conv6_bias = bias_variable([512], value=0.1, name=CONV6_BIAS) conv6_z = conv2d(pool2, conv6_weights) + conv6_bias conv6_a = tf.nn.relu(conv6_z) # (13, 25, 512) -> (13, 25, 512) - with tf.variable_scope("pool3"): - pool3 = tf.layers.max_pooling2d(conv6_a, pool_size=[2, 2], strides=1, padding="same") + with tf.compat.v1.variable_scope("pool3"): + pool3 = tf.compat.v1.layers.max_pooling2d(conv6_a, pool_size=[2, 2], strides=1, padding="same") # (13, 25, 512) -> (13, 25, 512) - with tf.variable_scope("conv7"): + with tf.compat.v1.variable_scope("conv7"): conv7_weights = weights_variable_xavier([3, 3, 512, 512], name=CONV7_WEIGHTS) conv7_bias = bias_variable([512], value=0.1, name=CONV7_BIAS) conv7_z = conv2d(pool3, conv7_weights) + conv7_bias conv7_a = tf.nn.relu(conv7_z) # (13, 25, 512) -> (7, 13, 512) - with tf.variable_scope("pool4"): - pool4 = tf.layers.max_pooling2d(conv7_a, pool_size=[2, 2], strides=2, padding="same") + with tf.compat.v1.variable_scope("pool4"): + pool4 = tf.compat.v1.layers.max_pooling2d(conv7_a, pool_size=[2, 2], strides=2, padding="same") flatten = tf.reshape(pool4, [-1, 7 * 13 * 512]) - with tf.variable_scope("fc0"): + with tf.compat.v1.variable_scope("fc0"): fc0_weights = weights_variable_truncated_normal([7 * 13 * 512, 1024], stddev=0.005, name=FC0_WEIGHTS) fc0_bias = bias_variable([1024], value=0.1, name=FC0_BIAS) fc0_z = tf.matmul(flatten, fc0_weights) + fc0_bias fc0_a = tf.nn.relu(fc0_z) - dropout_0 = tf.layers.dropout(fc0_a, rate=self._drop_rate) + dropout_0 = tf.compat.v1.layers.dropout(fc0_a, rate=self._drop_rate) - with tf.variable_scope("fc1"): + with tf.compat.v1.variable_scope("fc1"): fc1_weights = weights_variable_truncated_normal([1024, 2048], stddev=0.005, name=FC1_WEIGHTS) fc1_bias = bias_variable([2048], value=0.1, name=FC1_BIAS) fc1_z = tf.matmul(dropout_0, fc1_weights) + fc1_bias fc1_a = tf.nn.relu(fc1_z) - dropout_1 = tf.layers.dropout(fc1_a, rate=self._drop_rate) + dropout_1 = tf.compat.v1.layers.dropout(fc1_a, rate=self._drop_rate) # Output layers - with tf.variable_scope("char0"): + with tf.compat.v1.variable_scope("char0"): char0_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR0_WEIGHTS) char0_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR0_BIAS) char0_logits = tf.matmul(dropout_1, char0_weights) + char0_bias char0_out = tf.nn.softmax(char0_logits) - with tf.variable_scope("char1"): + with tf.compat.v1.variable_scope("char1"): char1_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR1_WEIGHTS) char1_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR1_BIAS) char1_logits = tf.matmul(dropout_1, char1_weights) + char1_bias char1_out = tf.nn.softmax(char1_logits) - with tf.variable_scope("char2"): + with tf.compat.v1.variable_scope("char2"): char2_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR2_WEIGHTS) char2_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR2_BIAS) char2_logits = tf.matmul(dropout_1, char2_weights) + char2_bias char2_out = tf.nn.softmax(char2_logits) - with tf.variable_scope("char3"): + with tf.compat.v1.variable_scope("char3"): char3_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR3_WEIGHTS) char3_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR3_BIAS) char3_logits = tf.matmul(dropout_1, char3_weights) + char3_bias char3_out = tf.nn.softmax(char3_logits) - with tf.variable_scope("char4"): + with tf.compat.v1.variable_scope("char4"): char4_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR4_WEIGHTS) char4_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR4_BIAS) char4_logits = tf.matmul(dropout_1, char4_weights) + char4_bias char4_out = tf.nn.softmax(char4_logits) - with tf.variable_scope("char5"): + with tf.compat.v1.variable_scope("char5"): char5_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR5_WEIGHTS) char5_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR5_BIAS) char5_logits = tf.matmul(dropout_1, char5_weights) + char5_bias char5_out = tf.nn.softmax(char5_logits) - with tf.variable_scope("char6"): + with tf.compat.v1.variable_scope("char6"): char6_weights = weights_variable_xavier([2048, self._num_distinct_chars + 1], name=FC_CHAR6_WEIGHTS) char6_bias = bias_variable([self._num_distinct_chars + 1], name=FC_CHAR6_BIAS) char6_logits = tf.matmul(dropout_1, char6_weights) + char6_bias @@ -358,15 +360,15 @@ def variable_summaries(self, var_name): """ var_variable = self._weight_vars[var_name] - with tf.name_scope("{}_summary".format(var_name)): - mean = tf.reduce_mean(var_variable) - mean_summary = tf.summary.scalar("mean", mean) - with tf.name_scope("stddev"): - stddev = tf.sqrt(tf.reduce_mean(tf.square(var_variable - mean))) - stddev_summary = tf.summary.scalar("stddev", stddev) - max_summary = tf.summary.scalar("max", tf.reduce_max(var_variable)) - min_summary = tf.summary.scalar("min", tf.reduce_min(var_variable)) - histogram_summary = tf.summary.histogram("histogram", var_variable) + with tf.compat.v1.name_scope("{}_summary".format(var_name)): + mean = tf.reduce_mean(input_tensor=var_variable) + mean_summary = tf.compat.v1.summary.scalar("mean", mean) + with tf.compat.v1.name_scope("stddev"): + stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var_variable - mean))) + stddev_summary = tf.compat.v1.summary.scalar("stddev", stddev) + max_summary = tf.compat.v1.summary.scalar("max", tf.reduce_max(input_tensor=var_variable)) + min_summary = tf.compat.v1.summary.scalar("min", tf.reduce_min(input_tensor=var_variable)) + histogram_summary = tf.compat.v1.summary.histogram("histogram", var_variable) return [mean_summary, stddev_summary, max_summary, min_summary, histogram_summary] @@ -390,50 +392,50 @@ def train(self, path_to_training_set, path_to_validation_set): # Divide into batches batched_dataset = dataset.batch(self._training_batch_size) - iterator = batched_dataset.make_initializable_iterator() + iterator = tf.compat.v1.data.make_initializable_iterator(batched_dataset) next_element_op = iterator.get_next() # Fetch the output units char_logits = self._output_logits - with tf.name_scope("loss"): - char0_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 0], logits=char_logits[:, 0, :])) - char1_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 1], logits=char_logits[:, 1, :])) - char2_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 2], logits=char_logits[:, 2, :])) - char3_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 3], logits=char_logits[:, 3, :])) - char4_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 4], logits=char_logits[:, 4, :])) - char5_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 5], logits=char_logits[:, 5, :])) - char6_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self._char_labels[:, 6], logits=char_logits[:, 6, :])) + with tf.compat.v1.name_scope("loss"): + char0_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 0]), logits=char_logits[:, 0, :])) + char1_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 1]), logits=char_logits[:, 1, :])) + char2_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 2]), logits=char_logits[:, 2, :])) + char3_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 3]), logits=char_logits[:, 3, :])) + char4_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 4]), logits=char_logits[:, 4, :])) + char5_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 5]), logits=char_logits[:, 5, :])) + char6_cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(labels=tf.stop_gradient(self._char_labels[:, 6]), logits=char_logits[:, 6, :])) loss = char0_cross_entropy + char1_cross_entropy + char2_cross_entropy + char3_cross_entropy + char4_cross_entropy + char5_cross_entropy + char6_cross_entropy # Set up optimizer - with tf.name_scope("optimizer"): + with tf.compat.v1.name_scope("optimizer"): global_step = tf.Variable(0, name="global_step", trainable=False) - learning_rate = tf.train.exponential_decay(1e-2, global_step=global_step, decay_steps=num_batches, decay_rate=0.9, staircase=True) - optimizer = tf.train.GradientDescentOptimizer(learning_rate) + learning_rate = tf.compat.v1.train.exponential_decay(1e-2, global_step=global_step, decay_steps=num_batches, decay_rate=0.9, staircase=True) + optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) # Summary - loss_summary = tf.summary.scalar("loss", loss) - learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate) + loss_summary = tf.compat.v1.summary.scalar("loss", loss) + learning_rate_summary = tf.compat.v1.summary.scalar("learning_rate", learning_rate) # Show statistics for each of our weight variables on TensorBoard training_summary_tensors = [] for var_name in self._weight_vars.keys(): summary_tensors = self.variable_summaries(var_name) training_summary_tensors.extend(summary_tensors) - summary_op = tf.summary.merge([loss_summary, learning_rate_summary] + training_summary_tensors) + summary_op = tf.compat.v1.summary.merge([loss_summary, learning_rate_summary] + training_summary_tensors) # Prepare paths for summaries summary_dir_name = time.strftime("%Y_%m_%d_%H_%M_%S") + "-" + self._model_name training_summary_dir = os.path.join(self._summary_dir, summary_dir_name, "training") validation_summary_dir = os.path.join(self._summary_dir, summary_dir_name, "validation") # Create two different summary writers to give statistics on training and validation images - training_summary_writer = tf.summary.FileWriter(training_summary_dir, graph=self._sess.graph) + training_summary_writer = tf.compat.v1.summary.FileWriter(training_summary_dir, graph=self._sess.graph) # Set up evaluation summary writer without graph to avoid overlap with training graph - self._eval_summary_writer = tf.summary.FileWriter(validation_summary_dir) + self._eval_summary_writer = tf.compat.v1.summary.FileWriter(validation_summary_dir) # Start the training - self._sess.run(tf.global_variables_initializer()) + self._sess.run(tf.compat.v1.global_variables_initializer()) # Restore model checkpoint if self.load(): @@ -518,7 +520,7 @@ def evaluate(self, path_to_dataset, global_step=0, batch_size=128, store_results # Divide data set into batches batched_dataset = dataset.batch(batch_size) - iterator = batched_dataset.make_initializable_iterator() + iterator = tf.compat.v1.data.make_initializable_iterator(batched_dataset) next_element_op = iterator.get_next() # Set up writer to store predictions @@ -564,7 +566,7 @@ def evaluate(self, path_to_dataset, global_step=0, batch_size=128, store_results except tf.errors.OutOfRangeError: top_k_accuracy = np.mean(np.concatenate(top_k_accuracies)) - summary = tf.Summary() + summary = tf.compat.v1.Summary() summary.value.add(tag="accuracy", simple_value=top_k_accuracy) # Write evaluation summary if evaluation summary writer was set up (which it is during training) diff --git a/src/ops.py b/src/ops.py index dcbc66c..48edeb8 100644 --- a/src/ops.py +++ b/src/ops.py @@ -5,14 +5,14 @@ def conv2d(x, W): """ Returns a 2D convolutional layer with full stride. """ - return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME") + return tf.nn.conv2d(input=x, filters=W, strides=[1, 1, 1, 1], padding="SAME") def max_pool_2x2(x): """ Max-pooling over 2x2 blocks """ - return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") + return tf.nn.max_pool2d(input=x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") def weights_variable_truncated_normal(shape, mean=0.0, stddev=0.1, name=None, trainable=True): @@ -28,7 +28,7 @@ def weights_variable_truncated_normal(shape, mean=0.0, stddev=0.1, name=None, tr the default list of variables to use by the `Optimizer` classes. :return: Weight matrix """ - initial = tf.truncated_normal(shape, mean=mean, stddev=stddev) + initial = tf.random.truncated_normal(shape, mean=mean, stddev=stddev) return tf.Variable(initial, trainable=trainable, name=name) @@ -43,7 +43,7 @@ def weights_variable_xavier(shape, name, trainable=True): the default list of variables to use by the `Optimizer` classes. :return: Weight matrix """ - return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer(), trainable=trainable) + return tf.compat.v1.get_variable(name, shape=shape, initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), trainable=trainable) def bias_variable(shape, value=0, name=None, trainable=True): diff --git a/src/train.py b/src/train.py index 5487e87..fc56c92 100644 --- a/src/train.py +++ b/src/train.py @@ -12,7 +12,7 @@ parser.add_argument("--summary_dir", type=str, help="Directory where to store summary", default="summary") args = vars(parser.parse_args()) -with tf.Session() as sess: +with tf.compat.v1.Session() as sess: cnn = LicensePlatesCNN(sess, args["checkpoint_dir"], args["summary_dir"],