diff --git a/keras_utils.py b/keras_utils.py index 6c198e8..04c294b 100644 --- a/keras_utils.py +++ b/keras_utils.py @@ -2,10 +2,10 @@ # -*- coding: utf-8 -*- from collections import defaultdict import numpy as np -from keras.models import save_model -import tensorflow as tf -import keras -from keras import backend as K +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import keras +from tensorflow.compat.v1.keras import backend as K +from tensorflow.compat.v1.keras.models import save_model import tqdm_utils diff --git a/week2/preprocessed_mnist.py b/week2/preprocessed_mnist.py index e04ea50..b838084 100644 --- a/week2/preprocessed_mnist.py +++ b/week2/preprocessed_mnist.py @@ -1,4 +1,4 @@ -import keras +from tensorflow import keras def load_dataset(flatten=False): diff --git a/week2/v2/digits_classification.ipynb b/week2/v2/digits_classification.ipynb index 9da64c7..cf7bb95 100644 --- a/week2/v2/digits_classification.ipynb +++ b/week2/v2/digits_classification.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": { @@ -38,7 +28,7 @@ "from sklearn.metrics import accuracy_score\n", "from matplotlib import pyplot as plt\n", "%matplotlib inline\n", - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "print(\"We're using TF\", tf.__version__)\n", "\n", "import sys\n", @@ -176,7 +166,7 @@ "metadata": {}, "outputs": [], "source": [ - "import keras\n", + "from tensorflow import keras\n", "\n", "y_train_oh = keras.utils.to_categorical(y_train, 10)\n", "y_val_oh = keras.utils.to_categorical(y_val, 10)\n", @@ -192,6 +182,7 @@ "outputs": [], "source": [ "# run this again if you remake your graph\n", + "tf.disable_eager_execution()\n", "s = reset_tf_session()" ] }, @@ -398,4 +389,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/week2/v2/grading_utils.py b/week2/v2/grading_utils.py index a96058a..3780aa0 100644 --- a/week2/v2/grading_utils.py +++ b/week2/v2/grading_utils.py @@ -2,12 +2,8 @@ # -*- coding: utf-8 -*- -def get_tensor_shape(t): - return [d.value for d in t.shape] - - def get_tensors_shapes_string(tensors): res = [] for t in tensors: - res.extend([str(v) for v in get_tensor_shape(t)]) + res.extend([str(v) for v in t.shape]) return " ".join(res) diff --git a/week2/v2/ill-conditioned-demo.ipynb b/week2/v2/ill-conditioned-demo.ipynb index 0b20560..b473366 100644 --- a/week2/v2/ill-conditioned-demo.ipynb +++ b/week2/v2/ill-conditioned-demo.ipynb @@ -1,47 +1,25 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ "Read about ill-conditioning: http://cnl.salk.edu/~schraudo/teach/NNcourse/precond.html" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Using TensorFlow backend.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "We're using TF 1.3.0\n" - ] - } - ], + "outputs": [], "source": [ - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "import sys\n", "sys.path.append(\"../..\")\n", "from keras_utils import reset_tf_session\n", + "tf.disable_eager_execution()\n", "s = reset_tf_session()\n", "print(\"We're using TF\", tf.__version__)\n", "from matplotlib import animation, rc\n", @@ -53,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -103,38 +81,18 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "plot_gd(x_scale=1.0, lr=0.1, steps=25)" ] }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ] - }, + "execution_count": null, "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# narrow valleys\n", "plot_gd(x_scale=0.5, lr=0.1, steps=25)" @@ -142,19 +100,9 @@ }, { "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ] - }, + "execution_count": null, "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# narrower valleys\n", "plot_gd(x_scale=0.2, lr=0.1, steps=25)" @@ -162,19 +110,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ] - }, + "execution_count": null, "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# bigger learning rate then?\n", "# x is changed faster, but y changes are too big, leads to oscillation\n", @@ -203,4 +141,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/week2/v2/intro_to_tensorflow.ipynb b/week2/v2/intro_to_tensorflow.ipynb index 540379f..c2ef7ad 100644 --- a/week2/v2/intro_to_tensorflow.ipynb +++ b/week2/v2/intro_to_tensorflow.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": { @@ -48,10 +38,11 @@ "metadata": {}, "outputs": [], "source": [ - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "import sys\n", "sys.path.append(\"../..\")\n", "from keras_utils import reset_tf_session\n", + "tf.disable_eager_execution()\n", "s = reset_tf_session()\n", "print(\"We're using TF\", tf.__version__)" ] @@ -581,4 +572,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/week2/v2/mnist_with_keras.ipynb b/week2/v2/mnist_with_keras.ipynb index 3262c8a..facb95b 100644 --- a/week2/v2/mnist_with_keras.ipynb +++ b/week2/v2/mnist_with_keras.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": { @@ -46,13 +36,12 @@ "%matplotlib inline\n", "import tensorflow as tf\n", "print(\"We're using TF\", tf.__version__)\n", - "import keras\n", + "from tensorflow import keras\n", "print(\"We are using Keras\", keras.__version__)\n", "\n", "import sys\n", "sys.path.append(\"../..\")\n", - "import keras_utils\n", - "from keras_utils import reset_tf_session" + "import keras_utils" ] }, { @@ -127,11 +116,11 @@ "outputs": [], "source": [ "# building a model with keras\n", - "from keras.layers import Dense, Activation\n", - "from keras.models import Sequential\n", + "from tensorflow.keras.layers import Dense, Activation\n", + "from tensorflow.keras.models import Sequential\n", "\n", "# we still need to clear a graph though\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "\n", "model = Sequential() # it is a feed-forward network without loops like in RNN\n", "model.add(Dense(256, input_shape=(784,))) # the first layer must specify the input shape (replacing placeholders)\n", @@ -259,4 +248,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/week2/v2/preprocessed_mnist.py b/week2/v2/preprocessed_mnist.py index e04ea50..b838084 100644 --- a/week2/v2/preprocessed_mnist.py +++ b/week2/v2/preprocessed_mnist.py @@ -1,4 +1,4 @@ -import keras +from tensorflow import keras def load_dataset(flatten=False): diff --git a/week2/v2/tensorflow_examples_from_video.ipynb b/week2/v2/tensorflow_examples_from_video.ipynb index a2129d8..00c9098 100644 --- a/week2/v2/tensorflow_examples_from_video.ipynb +++ b/week2/v2/tensorflow_examples_from_video.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -19,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -28,26 +18,19 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.3.0\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", + "tf.disable_eager_execution()\n", "import numpy as np\n", "print(tf.__version__)" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -59,24 +42,16 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(\"matmul:0\", shape=(2, 2), dtype=float32)\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(c)" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -85,21 +60,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 2., 2.],\n", - " [ 2., 2.]], dtype=float32)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "s.run(tf.global_variables_initializer())\n", "s.run(c, feed_dict={a: np.ones((2, 2))})" @@ -107,7 +70,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -119,18 +82,22 @@ "metadata": {}, "source": [ "# Video \"Our first model in TensorFlow\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Simple optimization (with simple prints)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -141,7 +108,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -151,46 +118,18 @@ }, { "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "tf.trainable_variables()" ] }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-0.865988 1.17177\n", - "-0.69279 0.749935\n", - "-0.554232 0.479959\n", - "-0.443386 0.307174\n", - "-0.354709 0.196591\n", - "-0.283767 0.125818\n", - "-0.227014 0.0805237\n", - "-0.181611 0.0515352\n", - "-0.145289 0.0329825\n", - "-0.116231 0.0211088\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "with tf.Session() as s: # in this way session will be closed automatically\n", " s.run(tf.global_variables_initializer())\n", @@ -204,11 +143,13 @@ "metadata": {}, "source": [ "### Simple optimization (with tf.Print)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -220,7 +161,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -230,7 +171,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -242,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -264,11 +205,13 @@ "metadata": {}, "source": [ "### Simple optimization (with TensorBoard logging)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -279,7 +222,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -289,7 +232,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -300,7 +243,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -318,7 +261,9 @@ "metadata": {}, "source": [ "Run `tensorboard --logdir=./logs` in bash" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -326,14 +271,18 @@ "source": [ "This is what you can see in your browser **(not available in Coursera Notebooks)**\n", "" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**If you're running on Google Colab you can still run TensorBoard!**" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", @@ -355,7 +304,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -367,22 +316,15 @@ "metadata": {}, "source": [ "### Training a linear model" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(1000, 3) (1000, 1)\n", - "[[ 0.09498027 0.48793618 0.39011257]]\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# generate model data\n", "N = 1000\n", @@ -397,17 +339,9 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(?, 1) (?, 1) ()\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "tf.reset_default_graph()\n", "\n", @@ -424,7 +358,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -434,24 +368,11 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.532865\n", - "0.0458802\n", - "0.0410158\n", - "0.040087\n", - "0.0399092\n", - "0.0398751\n" - ] - } - ], + "outputs": [], "source": [ "with tf.Session() as s:\n", " s.run(tf.global_variables_initializer())\n", @@ -464,20 +385,9 @@ }, { "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 0.11388827, 0.4882018 , 0.36716884]], dtype=float32)" - ] - }, - "execution_count": 25, + "execution_count": null, "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# found weights\n", "curr_weights.T" @@ -485,20 +395,9 @@ }, { "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[ 0.09498027, 0.48793618, 0.39011257]])" - ] - }, - "execution_count": 26, + "execution_count": null, "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# true weights\n", "w.T" @@ -526,4 +425,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/week3/week3_task1_first_cnn_cifar10_clean.ipynb b/week3/week3_task1_first_cnn_cifar10_clean.ipynb index be94d0d..450e1e6 100644 --- a/week3/week3_task1_first_cnn_cifar10_clean.ipynb +++ b/week3/week3_task1_first_cnn_cifar10_clean.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": { @@ -90,8 +80,8 @@ "outputs": [], "source": [ "import tensorflow as tf\n", - "import keras\n", - "from keras import backend as K\n", + "from tensorflow import keras\n", + "from tensorflow.keras import backend as K\n", "import numpy as np\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", @@ -149,7 +139,7 @@ }, "outputs": [], "source": [ - "from keras.datasets import cifar10\n", + "from tensorflow.keras.datasets import cifar10\n", "(x_train, y_train), (x_test, y_test) = cifar10.load_data()" ] }, @@ -265,9 +255,8 @@ "outputs": [], "source": [ "# import necessary building blocks\n", - "from keras.models import Sequential\n", - "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout\n", - "from keras.layers.advanced_activations import LeakyReLU" + "from tensorflow.keras.models import Sequential\n", + "from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Softmax, LeakyReLU, Dropout" ] }, { @@ -275,18 +264,18 @@ "metadata": {}, "source": [ "Convolutional networks are built from several types of layers:\n", - "- [Conv2D](https://keras.io/layers/convolutional/#conv2d) - performs convolution:\n", + "- [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) - performs convolution:\n", " - **filters**: number of output channels; \n", " - **kernel_size**: an integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window;\n", " - **padding**: padding=\"same\" adds zero padding to the input, so that the output has the same width and height, padding='valid' performs convolution only in locations where kernel and the input fully overlap;\n", " - **activation**: \"relu\", \"tanh\", etc.\n", " - **input_shape**: shape of input.\n", - "- [MaxPooling2D](https://keras.io/layers/pooling/#maxpooling2d) - performs 2D max pooling.\n", - "- [Flatten](https://keras.io/layers/core/#flatten) - flattens the input, does not affect the batch size.\n", - "- [Dense](https://keras.io/layers/core/#dense) - fully-connected layer.\n", - "- [Activation](https://keras.io/layers/core/#activation) - applies an activation function.\n", - "- [LeakyReLU](https://keras.io/layers/advanced-activations/#leakyrelu) - applies leaky relu activation.\n", - "- [Dropout](https://keras.io/layers/core/#dropout) - applies dropout." + "- [MaxPool2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) - performs 2D max pooling.\n", + "- [Flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) - flattens the input, does not affect the batch size.\n", + "- [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) - fully-connected layer.\n", + "- [Softmax](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Softmax) - applies softmax activation function.\n", + "- [LeakyReLU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LeakyReLU) - applies leaky relu activation.\n", + "- [Dropout](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout) - applies dropout." ] }, { @@ -309,7 +298,7 @@ "... # here comes a bunch of convolutional, pooling and dropout layers\n", "\n", "model.add(Dense(NUM_CLASSES)) # the last layer with neuron for each class\n", - "model.add(Activation(\"softmax\")) # output probabilities\n", + "model.add(Softmax()) # output probabilities\n", "```\n", "\n", "Stack __4__ convolutional layers with kernel size __(3, 3)__ with growing number of filters __(16, 32, 32, 64)__, use \"same\" padding.\n", @@ -361,7 +350,7 @@ "outputs": [], "source": [ "# describe model\n", - "s = reset_tf_session() # clear default graph\n", + "K.clear_session() # clear default graph\n", "model = make_model()\n", "model.summary()" ] @@ -420,14 +409,14 @@ "BATCH_SIZE = 32\n", "EPOCHS = 10\n", "\n", - "s = reset_tf_session() # clear default graph\n", + "K.clear_session() # clear default graph\n", "# don't call K.set_learning_phase() !!! (otherwise will enable dropout in train/test simultaneously)\n", "model = make_model() # define our model\n", "\n", "# prepare model for fitting (loss, optimizer, etc)\n", "model.compile(\n", " loss='categorical_crossentropy', # we train 10-way classification\n", - " optimizer=keras.optimizers.adamax(lr=INIT_LR), # for SGD\n", + " optimizer=keras.optimizers.Adamax(lr=INIT_LR), # for SGD\n", " metrics=['accuracy'] # report accuracy during training\n", ")\n", "\n", @@ -460,8 +449,8 @@ "\n", "#### uncomment below to continue training from model checkpoint\n", "#### fill `last_finished_epoch` with your latest finished epoch\n", - "# from keras.models import load_model\n", - "# s = reset_tf_session()\n", + "# from tensorflow.keras.models import load_model\n", + "# K.clear_session()\n", "# last_finished_epoch = 7\n", "# model = load_model(model_filename.format(last_finished_epoch))" ] @@ -653,6 +642,7 @@ "outputs": [], "source": [ "s = reset_tf_session() # clear default graph\n", + "tf.compat.v1.disable_eager_execution()\n", "K.set_learning_phase(0) # disable dropout\n", "model = make_model()\n", "model.load_weights(\"weights.h5\") # that were saved after model.fit" @@ -700,10 +690,10 @@ "\n", " # this is the placeholder for the input image\n", " input_img = model.input\n", - " img_width, img_height = input_img.shape.as_list()[1:3]\n", + " img_width, img_height = input_img.shape[1:3]\n", " \n", " # find the layer output by name\n", - " layer_output = list(filter(lambda x: x.name == layer_name, model.layers))[0].output\n", + " layer_output = model.get_layer(name=layer_name).output\n", "\n", " # we build a loss function that maximizes the activation\n", " # of the filter_index filter of the layer considered\n", @@ -755,7 +745,7 @@ " cols = 8\n", " rows = 2\n", " filter_index = 0\n", - " max_filter_index = list(filter(lambda x: x.name == layer_name, model.layers))[0].output.shape.as_list()[-1] - 1\n", + " max_filter_index = model.get_layer(name=layer_name).output.shape[-1] - 1\n", " fig = plt.figure(figsize=(2 * cols - 1, 3 * rows - 1))\n", " for i in range(cols):\n", " for j in range(rows):\n", @@ -789,7 +779,7 @@ "conv_activation_layers = []\n", "for layer in model.layers:\n", " if isinstance(layer, LeakyReLU):\n", - " prev_layer = layer.inbound_nodes[0].inbound_layers[0]\n", + " prev_layer = layer.inbound_nodes[0].inbound_layers\n", " if isinstance(prev_layer, Conv2D):\n", " conv_activation_layers.append(layer)\n", "\n", @@ -914,4 +904,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/week3/week3_task2_fine_tuning_clean.ipynb b/week3/week3_task2_fine_tuning_clean.ipynb index fef9275..c6518ea 100644 --- a/week3/week3_task2_fine_tuning_clean.ipynb +++ b/week3/week3_task2_fine_tuning_clean.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -82,8 +72,8 @@ "outputs": [], "source": [ "import tensorflow as tf\n", - "import keras\n", - "from keras import backend as K\n", + "from tensorflow import keras\n", + "from tensorflow.keras import backend as K\n", "import numpy as np\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", @@ -94,8 +84,7 @@ "import scipy.io\n", "import os\n", "import tarfile\n", - "import keras_utils\n", - "from keras_utils import reset_tf_session " + "import keras_utils" ] }, { @@ -494,7 +483,7 @@ " # stack images into 4D tensor [batch_size, img_size, img_size, 3]\n", " batch_imgs = np.stack(batch_imgs, axis=0)\n", " # convert targets into 2D tensor [batch_size, num_classes]\n", - " batch_targets = keras.utils.np_utils.to_categorical(batch_targets, N_CLASSES)\n", + " batch_targets = keras.utils.to_categorical(batch_targets, N_CLASSES)\n", " yield batch_imgs, batch_targets" ] }, @@ -553,7 +542,7 @@ "outputs": [], "source": [ "# remember to clear session if you start building graph from scratch!\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "# don't call K.set_learning_phase() !!! (otherwise will enable dropout in train/test simultaneously)" ] }, @@ -578,7 +567,7 @@ " new_output = keras.layers.GlobalAveragePooling2D()(model.output)\n", " # add new dense layer for our labels\n", " new_output = keras.layers.Dense(N_CLASSES, activation='softmax')(new_output)\n", - " model = keras.engine.training.Model(model.inputs, new_output)\n", + " model = keras.Model(model.inputs, new_output)\n", " return model" ] }, @@ -647,7 +636,7 @@ "# set all layers trainable by default\n", "for layer in model.layers:\n", " layer.trainable = True\n", - " if isinstance(layer, keras.layers.BatchNormalization):\n", + " if type(layer).__name__ == \"BatchNormalization\":\n", " # we do aggressive exponential smoothing of batch norm\n", " # parameters to faster adjust to our new dataset\n", " layer.momentum = 0.9\n", @@ -655,7 +644,7 @@ "# fix deep layers (fine-tuning only last 50)\n", "for layer in model.layers[:-50]:\n", " # fix all but batch norm layers, because we neeed to update moving averages for a new dataset!\n", - " if not isinstance(layer, keras.layers.BatchNormalization):\n", + " if type(layer).__name__ != \"BatchNormalization\":\n", " layer.trainable = False" ] }, @@ -675,7 +664,7 @@ "# compile new model\n", "model.compile(\n", " loss='categorical_crossentropy', # we train 102-way classification\n", - " optimizer=keras.optimizers.adamax(lr=1e-2), # we can take big lr here because we fixed first layers\n", + " optimizer=keras.optimizers.Adamax(lr=1e-2), # we can take big lr here because we fixed first layers\n", " metrics=['accuracy'] # report accuracy during training\n", ")" ] @@ -692,8 +681,8 @@ "\n", "#### uncomment below to continue training from model checkpoint\n", "#### fill `last_finished_epoch` with your latest finished epoch\n", - "# from keras.models import load_model\n", - "# s = reset_tf_session()\n", + "# from tensorflow.keras.models import load_model\n", + "# K.clear_session()\n", "# last_finished_epoch = 10\n", "# model = load_model(model_filename.format(last_finished_epoch))" ] @@ -720,7 +709,7 @@ "source": [ "# fine tune for 2 epochs (full passes through all training data)\n", "# we make 2*8 epochs, where epoch is 1/8 of our training data to see progress more often\n", - "model.fit_generator(\n", + "model.fit(\n", " train_generator(tr_files, tr_labels), \n", " steps_per_epoch=len(tr_files) // BATCH_SIZE // 8,\n", " epochs=2 * 8,\n", @@ -744,9 +733,9 @@ "source": [ "## GRADED PART, DO NOT CHANGE!\n", "# Accuracy on validation set\n", - "test_accuracy = model.evaluate_generator(\n", + "test_accuracy = model.evaluate(\n", " train_generator(te_files, te_labels), \n", - " len(te_files) // BATCH_SIZE // 2\n", + " steps=len(te_files) // BATCH_SIZE // 2\n", ")[1]\n", "grader.set_answer(\"wuwwC\", test_accuracy)\n", "print(test_accuracy)" @@ -823,4 +812,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/week4/Adversarial-task.ipynb b/week4/Adversarial-task.ipynb index bba5078..7fba86d 100644 --- a/week4/Adversarial-task.ipynb +++ b/week4/Adversarial-task.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -156,13 +146,14 @@ } ], "source": [ - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "from keras_utils import reset_tf_session\n", "s = reset_tf_session()\n", + "tf.disable_eager_execution()\n", "\n", - "import keras\n", - "from keras.models import Sequential\n", - "from keras import layers as L" + "from tensorflow import keras\n", + "from tensorflow.keras.models import Sequential\n", + "from tensorflow.keras import layers as L" ] }, { @@ -181,12 +172,12 @@ "generator.add(L.Dense(10*8*8, activation='elu'))\n", "\n", "generator.add(L.Reshape((8,8,10)))\n", - "generator.add(L.Deconv2D(64,kernel_size=(5,5),activation='elu'))\n", - "generator.add(L.Deconv2D(64,kernel_size=(5,5),activation='elu'))\n", + "generator.add(L.Conv2DTranspose(64,kernel_size=(5,5),activation='elu'))\n", + "generator.add(L.Conv2DTranspose(64,kernel_size=(5,5),activation='elu'))\n", "generator.add(L.UpSampling2D(size=(2,2)))\n", - "generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))\n", - "generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))\n", - "generator.add(L.Deconv2D(32,kernel_size=3,activation='elu'))\n", + "generator.add(L.Conv2DTranspose(32,kernel_size=3,activation='elu'))\n", + "generator.add(L.Conv2DTranspose(32,kernel_size=3,activation='elu'))\n", + "generator.add(L.Conv2DTranspose(32,kernel_size=3,activation='elu'))\n", "\n", "generator.add(L.Conv2D(3,kernel_size=3,activation=None))\n" ] @@ -501,4 +492,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file diff --git a/week4/Autoencoders-task.ipynb b/week4/Autoencoders-task.ipynb index 1ac9399..d5b90ad 100644 --- a/week4/Autoencoders-task.ipynb +++ b/week4/Autoencoders-task.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -58,7 +48,8 @@ "outputs": [], "source": [ "import tensorflow as tf\n", - "import keras, keras.layers as L, keras.backend as K\n", + "from tensorflow import keras\n", + "from tensorflow.keras import layers as L, backend as K\n", "import numpy as np\n", "from sklearn.model_selection import train_test_split\n", "from lfw_dataset import load_lfw_dataset\n", @@ -66,8 +57,7 @@ "import matplotlib.pyplot as plt\n", "import download_utils\n", "import keras_utils\n", - "import numpy as np\n", - "from keras_utils import reset_tf_session" + "import numpy as np" ] }, { @@ -273,7 +263,7 @@ }, "outputs": [], "source": [ - "s = reset_tf_session()\n", + "K.clear_session()\n", "\n", "encoder, decoder = build_pca_autoencoder(IMG_SHAPE, code_size=32)\n", "\n", @@ -400,21 +390,17 @@ " x = (np.arange(img_size ** 2, dtype=np.float32) + 1).reshape((1, img_size, img_size, 1))\n", " f = (np.ones(filter_size ** 2, dtype=np.float32)).reshape((filter_size, filter_size, 1, 1))\n", "\n", - " s = reset_tf_session()\n", - " \n", - " conv = tf.nn.conv2d_transpose(x, f, \n", - " output_shape=(1, img_size * 2, img_size * 2, 1), \n", - " strides=[1, 2, 2, 1], \n", - " padding='SAME')\n", + " result = tf.nn.conv2d_transpose(x, f, \n", + " output_shape=(1, img_size * 2, img_size * 2, 1), \n", + " strides=[1, 2, 2, 1], \n", + " padding='SAME')\n", "\n", - " result = s.run(conv)\n", " print(\"input:\")\n", " print(x[0, :, :, 0])\n", " print(\"filter:\")\n", " print(f[:, :, 0, 0])\n", " print(\"output:\")\n", " print(result[0, :, :, 0])\n", - " s.close()\n", " \n", "test_conv2d_transpose(img_size=2, filter_size=2)\n", "test_conv2d_transpose(img_size=2, filter_size=3)\n", @@ -467,8 +453,7 @@ "source": [ "# Check autoencoder shapes along different code_sizes\n", "get_dim = lambda layer: np.prod(layer.output_shape[1:])\n", - "for code_size in [1,8,32,128,512]:\n", - " s = reset_tf_session()\n", + "for code_size in [1,8,32,128,512]: \n", " encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=code_size)\n", " print(\"Testing code size %i\" % code_size)\n", " assert encoder.output_shape[1:]==(code_size,),\"encoder must output a code of required size\"\n", @@ -479,8 +464,7 @@ " for layer in encoder.layers + decoder.layers:\n", " assert get_dim(layer) >= code_size, \"Encoder layer %s is smaller than bottleneck (%i units)\"%(layer.name,get_dim(layer))\n", "\n", - "print(\"All tests passed!\")\n", - "s = reset_tf_session()" + "print(\"All tests passed!\")" ] }, { @@ -498,7 +482,7 @@ "source": [ "# Look at encoder and decoder shapes.\n", "# Total number of trainable parameters of encoder and decoder should be close.\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)\n", "encoder.summary()\n", "decoder.summary()" @@ -522,10 +506,6 @@ }, "outputs": [], "source": [ - "s = reset_tf_session()\n", - "\n", - "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)\n", - "\n", "inp = L.Input(IMG_SHAPE)\n", "code = encoder(inp)\n", "reconstruction = decoder(code)\n", @@ -551,7 +531,7 @@ "\n", "#### uncomment below to continue training from model checkpoint\n", "#### fill `last_finished_epoch` with your latest finished epoch\n", - "# from keras.models import load_model\n", + "# from tensorflow.keras.models import load_model\n", "# s = reset_tf_session()\n", "# last_finished_epoch = 4\n", "# autoencoder = load_model(model_filename.format(last_finished_epoch))\n", @@ -630,7 +610,7 @@ "outputs": [], "source": [ "# restore trained weights\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "\n", "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)\n", "encoder.load_weights(\"encoder.h5\")\n", @@ -772,7 +752,7 @@ }, "outputs": [], "source": [ - "s = reset_tf_session()\n", + "K.clear_session()\n", "\n", "# we use bigger code size here for better quality\n", "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=512)\n", @@ -846,7 +826,7 @@ "outputs": [], "source": [ "# restore trained encoder weights\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)\n", "encoder.load_weights(\"encoder.h5\")" ] @@ -880,7 +860,7 @@ }, "outputs": [], "source": [ - "from sklearn.neighbors.unsupervised import NearestNeighbors\n", + "from sklearn.neighbors import NearestNeighbors\n", "nei_clf = NearestNeighbors(metric=\"euclidean\")\n", "nei_clf.fit(codes)" ] @@ -1020,7 +1000,7 @@ "outputs": [], "source": [ "# restore trained encoder weights\n", - "s = reset_tf_session()\n", + "K.clear_session()\n", "encoder, decoder = build_deep_autoencoder(IMG_SHAPE, code_size=32)\n", "encoder.load_weights(\"encoder.h5\")\n", "decoder.load_weights(\"decoder.h5\")" @@ -1115,4 +1095,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/week5/RNN-task.ipynb b/week5/RNN-task.ipynb index 12283c2..911fee6 100644 --- a/week5/RNN-task.ipynb +++ b/week5/RNN-task.ipynb @@ -1,15 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# set tf 1.x for colab\n", - "%tensorflow_version 1.x" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -34,7 +24,7 @@ }, "outputs": [], "source": [ - "import tensorflow as tf\n", + "import tensorflow.compat.v1 as tf\n", "print(tf.__version__)\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", @@ -236,7 +226,8 @@ "outputs": [], "source": [ "# remember to reset your session if you change your graph!\n", - "s = keras_utils.reset_tf_session()" + "s = keras_utils.reset_tf_session()\n", + "tf.disable_eager_execution()" ] }, { @@ -250,8 +241,8 @@ }, "outputs": [], "source": [ - "import keras\n", - "from keras.layers import concatenate, Dense, Embedding\n", + "from tensorflow import keras\n", + "from tensorflow.keras.layers import Dense, Embedding\n", "\n", "rnn_num_units = 64 # size of hidden state\n", "embedding_size = 16 # for characters\n", @@ -662,7 +653,7 @@ " \n", "cell = CustomRNN(rnn_num_units)\n", "\n", - "input_sequence = tf.placeholder(tf.int32, (None, None))\n", + "input_sequence = tf.placeholder(tf.float32, (None, None))\n", " \n", "predicted_probas, last_state = tf.nn.dynamic_rnn(cell, input_sequence[:, :, None], dtype=tf.float32)\n", "\n", @@ -690,7 +681,7 @@ }, "outputs": [], "source": [ - "for obj in dir(tf.nn.rnn_cell) + dir(tf.contrib.rnn):\n", + "for obj in dir(tf.nn.rnn_cell):\n", " if obj.endswith('Cell'):\n", " print(obj, end=\"\\t\")" ] @@ -743,4 +734,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} +} \ No newline at end of file