Skip to content

Commit

Permalink
Update week 3 assignments
Browse files Browse the repository at this point in the history
  • Loading branch information
dimitry-ishenko committed Mar 28, 2020
1 parent 2578ab3 commit 67ca75a
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 60 deletions.
58 changes: 24 additions & 34 deletions week3/week3_task1_first_cnn_cifar10_clean.ipynb
Original file line number Diff line number Diff line change
@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# set tf 1.x for colab\n",
"%tensorflow_version 1.x"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand Down Expand Up @@ -90,8 +80,8 @@
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import keras\n",
"from keras import backend as K\n",
"from tensorflow import keras\n",
"from tensorflow.keras import backend as K\n",
"import numpy as np\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
Expand Down Expand Up @@ -149,7 +139,7 @@
},
"outputs": [],
"source": [
"from keras.datasets import cifar10\n",
"from tensorflow.keras.datasets import cifar10\n",
"(x_train, y_train), (x_test, y_test) = cifar10.load_data()"
]
},
Expand Down Expand Up @@ -265,28 +255,27 @@
"outputs": [],
"source": [
"# import necessary building blocks\n",
"from keras.models import Sequential\n",
"from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout\n",
"from keras.layers.advanced_activations import LeakyReLU"
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Softmax, LeakyReLU, Dropout"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Convolutional networks are built from several types of layers:\n",
"- [Conv2D](https://keras.io/layers/convolutional/#conv2d) - performs convolution:\n",
"- [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) - performs convolution:\n",
" - **filters**: number of output channels; \n",
" - **kernel_size**: an integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window;\n",
" - **padding**: padding=\"same\" adds zero padding to the input, so that the output has the same width and height, padding='valid' performs convolution only in locations where kernel and the input fully overlap;\n",
" - **activation**: \"relu\", \"tanh\", etc.\n",
" - **input_shape**: shape of input.\n",
"- [MaxPooling2D](https://keras.io/layers/pooling/#maxpooling2d) - performs 2D max pooling.\n",
"- [Flatten](https://keras.io/layers/core/#flatten) - flattens the input, does not affect the batch size.\n",
"- [Dense](https://keras.io/layers/core/#dense) - fully-connected layer.\n",
"- [Activation](https://keras.io/layers/core/#activation) - applies an activation function.\n",
"- [LeakyReLU](https://keras.io/layers/advanced-activations/#leakyrelu) - applies leaky relu activation.\n",
"- [Dropout](https://keras.io/layers/core/#dropout) - applies dropout."
"- [MaxPool2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) - performs 2D max pooling.\n",
"- [Flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) - flattens the input, does not affect the batch size.\n",
"- [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) - fully-connected layer.\n",
"- [Softmax](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Softmax) - applies softmax activation function.\n",
"- [LeakyReLU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LeakyReLU) - applies leaky relu activation.\n",
"- [Dropout](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout) - applies dropout."
]
},
{
Expand All @@ -309,7 +298,7 @@
"... # here comes a bunch of convolutional, pooling and dropout layers\n",
"\n",
"model.add(Dense(NUM_CLASSES)) # the last layer with neuron for each class\n",
"model.add(Activation(\"softmax\")) # output probabilities\n",
"model.add(Softmax()) # output probabilities\n",
"```\n",
"\n",
"Stack __4__ convolutional layers with kernel size __(3, 3)__ with growing number of filters __(16, 32, 32, 64)__, use \"same\" padding.\n",
Expand Down Expand Up @@ -361,7 +350,7 @@
"outputs": [],
"source": [
"# describe model\n",
"s = reset_tf_session() # clear default graph\n",
"K.clear_session() # clear default graph\n",
"model = make_model()\n",
"model.summary()"
]
Expand Down Expand Up @@ -420,14 +409,14 @@
"BATCH_SIZE = 32\n",
"EPOCHS = 10\n",
"\n",
"s = reset_tf_session() # clear default graph\n",
"K.clear_session() # clear default graph\n",
"# don't call K.set_learning_phase() !!! (otherwise will enable dropout in train/test simultaneously)\n",
"model = make_model() # define our model\n",
"\n",
"# prepare model for fitting (loss, optimizer, etc)\n",
"model.compile(\n",
" loss='categorical_crossentropy', # we train 10-way classification\n",
" optimizer=keras.optimizers.adamax(lr=INIT_LR), # for SGD\n",
" optimizer=keras.optimizers.Adamax(lr=INIT_LR), # for SGD\n",
" metrics=['accuracy'] # report accuracy during training\n",
")\n",
"\n",
Expand Down Expand Up @@ -460,8 +449,8 @@
"\n",
"#### uncomment below to continue training from model checkpoint\n",
"#### fill `last_finished_epoch` with your latest finished epoch\n",
"# from keras.models import load_model\n",
"# s = reset_tf_session()\n",
"# from tensorflow.keras.models import load_model\n",
"# K.clear_session()\n",
"# last_finished_epoch = 7\n",
"# model = load_model(model_filename.format(last_finished_epoch))"
]
Expand Down Expand Up @@ -653,6 +642,7 @@
"outputs": [],
"source": [
"s = reset_tf_session() # clear default graph\n",
"tf.compat.v1.disable_eager_execution()\n",
"K.set_learning_phase(0) # disable dropout\n",
"model = make_model()\n",
"model.load_weights(\"weights.h5\") # that were saved after model.fit"
Expand Down Expand Up @@ -700,10 +690,10 @@
"\n",
" # this is the placeholder for the input image\n",
" input_img = model.input\n",
" img_width, img_height = input_img.shape.as_list()[1:3]\n",
" img_width, img_height = input_img.shape[1:3]\n",
" \n",
" # find the layer output by name\n",
" layer_output = list(filter(lambda x: x.name == layer_name, model.layers))[0].output\n",
" layer_output = model.get_layer(name=layer_name).output\n",
"\n",
" # we build a loss function that maximizes the activation\n",
" # of the filter_index filter of the layer considered\n",
Expand Down Expand Up @@ -755,7 +745,7 @@
" cols = 8\n",
" rows = 2\n",
" filter_index = 0\n",
" max_filter_index = list(filter(lambda x: x.name == layer_name, model.layers))[0].output.shape.as_list()[-1] - 1\n",
" max_filter_index = model.get_layer(name=layer_name).output.shape[-1] - 1\n",
" fig = plt.figure(figsize=(2 * cols - 1, 3 * rows - 1))\n",
" for i in range(cols):\n",
" for j in range(rows):\n",
Expand Down Expand Up @@ -789,7 +779,7 @@
"conv_activation_layers = []\n",
"for layer in model.layers:\n",
" if isinstance(layer, LeakyReLU):\n",
" prev_layer = layer.inbound_nodes[0].inbound_layers[0]\n",
" prev_layer = layer.inbound_nodes[0].inbound_layers\n",
" if isinstance(prev_layer, Conv2D):\n",
" conv_activation_layers.append(layer)\n",
"\n",
Expand Down Expand Up @@ -914,4 +904,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}
41 changes: 15 additions & 26 deletions week3/week3_task2_fine_tuning_clean.ipynb
Original file line number Diff line number Diff line change
@@ -1,15 +1,5 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# set tf 1.x for colab\n",
"%tensorflow_version 1.x"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -82,8 +72,8 @@
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import keras\n",
"from keras import backend as K\n",
"from tensorflow import keras\n",
"from tensorflow.keras import backend as K\n",
"import numpy as np\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
Expand All @@ -94,8 +84,7 @@
"import scipy.io\n",
"import os\n",
"import tarfile\n",
"import keras_utils\n",
"from keras_utils import reset_tf_session "
"import keras_utils"
]
},
{
Expand Down Expand Up @@ -494,7 +483,7 @@
" # stack images into 4D tensor [batch_size, img_size, img_size, 3]\n",
" batch_imgs = np.stack(batch_imgs, axis=0)\n",
" # convert targets into 2D tensor [batch_size, num_classes]\n",
" batch_targets = keras.utils.np_utils.to_categorical(batch_targets, N_CLASSES)\n",
" batch_targets = keras.utils.to_categorical(batch_targets, N_CLASSES)\n",
" yield batch_imgs, batch_targets"
]
},
Expand Down Expand Up @@ -553,7 +542,7 @@
"outputs": [],
"source": [
"# remember to clear session if you start building graph from scratch!\n",
"s = reset_tf_session()\n",
"K.clear_session()\n",
"# don't call K.set_learning_phase() !!! (otherwise will enable dropout in train/test simultaneously)"
]
},
Expand All @@ -578,7 +567,7 @@
" new_output = keras.layers.GlobalAveragePooling2D()(model.output)\n",
" # add new dense layer for our labels\n",
" new_output = keras.layers.Dense(N_CLASSES, activation='softmax')(new_output)\n",
" model = keras.engine.training.Model(model.inputs, new_output)\n",
" model = keras.Model(model.inputs, new_output)\n",
" return model"
]
},
Expand Down Expand Up @@ -647,15 +636,15 @@
"# set all layers trainable by default\n",
"for layer in model.layers:\n",
" layer.trainable = True\n",
" if isinstance(layer, keras.layers.BatchNormalization):\n",
" if type(layer).__name__ == \"BatchNormalization\":\n",
" # we do aggressive exponential smoothing of batch norm\n",
" # parameters to faster adjust to our new dataset\n",
" layer.momentum = 0.9\n",
" \n",
"# fix deep layers (fine-tuning only last 50)\n",
"for layer in model.layers[:-50]:\n",
" # fix all but batch norm layers, because we neeed to update moving averages for a new dataset!\n",
" if not isinstance(layer, keras.layers.BatchNormalization):\n",
" if type(layer).__name__ != \"BatchNormalization\":\n",
" layer.trainable = False"
]
},
Expand All @@ -675,7 +664,7 @@
"# compile new model\n",
"model.compile(\n",
" loss='categorical_crossentropy', # we train 102-way classification\n",
" optimizer=keras.optimizers.adamax(lr=1e-2), # we can take big lr here because we fixed first layers\n",
" optimizer=keras.optimizers.Adamax(lr=1e-2), # we can take big lr here because we fixed first layers\n",
" metrics=['accuracy'] # report accuracy during training\n",
")"
]
Expand All @@ -692,8 +681,8 @@
"\n",
"#### uncomment below to continue training from model checkpoint\n",
"#### fill `last_finished_epoch` with your latest finished epoch\n",
"# from keras.models import load_model\n",
"# s = reset_tf_session()\n",
"# from tensorflow.keras.models import load_model\n",
"# K.clear_session()\n",
"# last_finished_epoch = 10\n",
"# model = load_model(model_filename.format(last_finished_epoch))"
]
Expand All @@ -720,7 +709,7 @@
"source": [
"# fine tune for 2 epochs (full passes through all training data)\n",
"# we make 2*8 epochs, where epoch is 1/8 of our training data to see progress more often\n",
"model.fit_generator(\n",
"model.fit(\n",
" train_generator(tr_files, tr_labels), \n",
" steps_per_epoch=len(tr_files) // BATCH_SIZE // 8,\n",
" epochs=2 * 8,\n",
Expand All @@ -744,9 +733,9 @@
"source": [
"## GRADED PART, DO NOT CHANGE!\n",
"# Accuracy on validation set\n",
"test_accuracy = model.evaluate_generator(\n",
"test_accuracy = model.evaluate(\n",
" train_generator(te_files, te_labels), \n",
" len(te_files) // BATCH_SIZE // 2\n",
" steps=len(te_files) // BATCH_SIZE // 2\n",
")[1]\n",
"grader.set_answer(\"wuwwC\", test_accuracy)\n",
"print(test_accuracy)"
Expand Down Expand Up @@ -823,4 +812,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}

0 comments on commit 67ca75a

Please sign in to comment.