From 586307d214ed93f190a4c14400df4cddf12928ba Mon Sep 17 00:00:00 2001 From: leoarc Date: Thu, 23 Jul 2020 18:00:54 +0530 Subject: [PATCH] added executed file --- Segmentation Sample Models/YNet/YNet.ipynb | 975 ++++++++++++--------- 1 file changed, 541 insertions(+), 434 deletions(-) diff --git a/Segmentation Sample Models/YNet/YNet.ipynb b/Segmentation Sample Models/YNet/YNet.ipynb index 446383d..0106351 100644 --- a/Segmentation Sample Models/YNet/YNet.ipynb +++ b/Segmentation Sample Models/YNet/YNet.ipynb @@ -1,441 +1,548 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "YNet.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - } + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nsRYBZOk97Ie" + }, + "outputs": [], + "source": [ + "class PatchExtractor:\n", + " def __init__(self, img, patch_size, stride):\n", + " '''\n", + " :param img: :py:class:`~PIL.Image.Image`\n", + " :param patch_size: integer, size of the patch\n", + " :param stride: integer, size of the stride\n", + " '''\n", + " self.img = img\n", + " self.size = patch_size\n", + " self.stride = stride\n", + "\n", + " def extract_patches(self):\n", + " \"\"\"\n", + " extracts all patches from an image\n", + " :returns: A list of :py:class:`~PIL.Image.Image` objects.\n", + " \"\"\"\n", + " wp, hp = self.shape()\n", + " return [self.extract_patch((w, h)) for h in range(hp) for w in range(wp)]\n", + "\n", + " def extract_patch(self, patch):\n", + " \"\"\"\n", + " extracts a patch from an input image\n", + " :param patch: a tuple\n", + " :rtype: :py:class:`~PIL.Image.Image`\n", + " :returns: An :py:class:`~PIL.Image.Image` object.\n", + " \"\"\"\n", + " return self.img.crop((\n", + " patch[0] * self.stride, # left\n", + " patch[1] * self.stride, # up\n", + " patch[0] * self.stride + self.size, # right\n", + " patch[1] * self.stride + self.size # down\n", + " ))\n", + "\n", + " def shape(self):\n", + " wp = int((self.img.width - self.size) / self.stride + 1)\n", + " hp = int((self.img.height - self.size) / self.stride + 1)\n", + " return wp, hp\n" + ] }, - "cells": [ - { - "cell_type": "code", - "metadata": { - "id": "nsRYBZOk97Ie", - "colab_type": "code", - "colab": {} - }, - "source": [ - "class PatchExtractor:\n", - " def __init__(self, img, patch_size, stride):\n", - " '''\n", - " :param img: :py:class:`~PIL.Image.Image`\n", - " :param patch_size: integer, size of the patch\n", - " :param stride: integer, size of the stride\n", - " '''\n", - " self.img = img\n", - " self.size = patch_size\n", - " self.stride = stride\n", - "\n", - " def extract_patches(self):\n", - " \"\"\"\n", - " extracts all patches from an image\n", - " :returns: A list of :py:class:`~PIL.Image.Image` objects.\n", - " \"\"\"\n", - " wp, hp = self.shape()\n", - " return [self.extract_patch((w, h)) for h in range(hp) for w in range(wp)]\n", - "\n", - " def extract_patch(self, patch):\n", - " \"\"\"\n", - " extracts a patch from an input image\n", - " :param patch: a tuple\n", - " :rtype: :py:class:`~PIL.Image.Image`\n", - " :returns: An :py:class:`~PIL.Image.Image` object.\n", - " \"\"\"\n", - " return self.img.crop((\n", - " patch[0] * self.stride, # left\n", - " patch[1] * self.stride, # up\n", - " patch[0] * self.stride + self.size, # right\n", - " patch[1] * self.stride + self.size # down\n", - " ))\n", - "\n", - " def shape(self):\n", - " wp = int((self.img.width - self.size) / self.stride + 1)\n", - " hp = int((self.img.height - self.size) / self.stride + 1)\n", - " return wp, hp\n" - ], - "execution_count": 6, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "b9jdznlx_6Pd", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import glob\n", - "from PIL import Image\n", - "\n", - "PATCH_SIZE = 256\n", - "\n", - "# This is the folder you'll find after extracting the dataset.\n", - "train_folder = './data/images'\n", - "labels = glob.glob(train_folder + '/*.tif')\n", - "labels.sort()" - ], - "execution_count": 7, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "GbQmmfIv_89L", - "colab_type": "code", - "colab": {} - }, - "source": [ - "for item in labels:\n", - " try:\n", - " with Image.open(item) as img:\n", - " extractor = PatchExtractor(img=img, patch_size=PATCH_SIZE, stride=256)\n", - " patches = extractor.extract_patches()\n", - " count = 0\n", - " for p in patches:\n", - " count += 1\n", - "# print('./data/images/patches_i/' + str(count) + '_' + item.split('/')[-1][:-3] + 'png')\n", - " p.save('./data/images/patches_i/' + str(count) + '_' + item.split('/')[-1][:-3] + 'png')\n", - " except Exception as error:\n", - " print('error with', item, error)" - ], - "execution_count": 5, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "tJOZuT_1__X7", - "colab_type": "code", - "colab": {} - }, - "source": [ - "labels = glob.glob('./data/br_masks/*.png')\n", - "labels.sort()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "dS3mNinvABQC", - "colab_type": "code", - "colab": {} - }, - "source": [ - "for item in labels:\n", - " try:\n", - " with Image.open(item) as img:\n", - " extractor = PatchExtractor(img=img, patch_size=PATCH_SIZE, stride=256)\n", - " patches = extractor.extract_patches()\n", - " count = 0\n", - " for p in patches:\n", - " count += 1\n", - " # print('./train/' + value + '/' + str(count) + '_' + key.split('/')[-1])\n", - " p.save('./data/br_masks/patches_m/' + str(count) + '_' + item.split('/')[-1])\n", - " except Exception as error:\n", - " print('error with', item, error)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "eSzM4cIhADqQ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "images = [item for item in glob.glob('data/images/patches_i/*')]\n", - "masks = [item for item in glob.glob('data/br_masks/patches_m/*')]\n", - "images.sort()\n", - "masks.sort()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "3_3IMuP7_Df7", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import sys\n", - "import os\n", - "import numpy as np\n", - "from keras.utils import Progbar\n", - "from skimage.io import imread, imshow, imread_collection, concatenate_images\n", - "from skimage.transform import resize\n", - "from skimage.morphology import label\n", - "from itertools import chain\n", - "from skimage.io import imread, imshow, imread_collection, concatenate_images\n", - "from skimage.transform import resize\n", - "from skimage.morphology import label\n", - "from tensorflow.keras.models import Model, load_model\n", - "from tensorflow.keras.layers import Input\n", - "# Function read train images and mask return as nump array\n", - "def read_train_data(IMG_WIDTH=256,IMG_HEIGHT=256,IMG_CHANNELS=3):\n", - " X_train = np.zeros((len(images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\n", - " Y_train = np.zeros((len(masks), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n", - " print('Getting and resizing train images and masks ... ')\n", - " sys.stdout.flush()\n", - " if os.path.isfile(\"train_img.npy\") and os.path.isfile(\"train_mask.npy\"):\n", - " print(\"Train file loaded from memory\")\n", - " X_train = np.load(\"train_img.npy\")\n", - " Y_train = np.load(\"train_mask.npy\")\n", - " return X_train,Y_train\n", - " a = Progbar(len(images))\n", - " for n, p in enumerate(zip(images, masks)):\n", - " path = p[0]\n", - " img = imread(path)[:,:,:IMG_CHANNELS]\n", - " img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n", - " X_train[n] = img\n", - " mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n", - " mask_ = imread(p[1])\n", - " mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', \n", - " preserve_range=True), axis=-1)\n", - " mask = np.maximum(mask, mask_)\n", - " Y_train[n] = mask\n", - " a.update(n)\n", - " np.save(\"train_img\",X_train)\n", - " np.save(\"train_mask\",Y_train)\n", - " return X_train,Y_train" - ], - "execution_count": 3, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "w2R9ukO6_G9W", - "colab_type": "code", - "colab": {} - }, - "source": [ - "train_img,train_mask = read_train_data()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "dIju4iZk_Re0", - "colab_type": "code", - "colab": {} - }, - "source": [ - "\n", - "\n" - ], - "execution_count": 2, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "LbtzgcSf-GwT", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import os,time,cv2\n", - "import tensorflow as tf\n", - "import numpy as np\n", - "import tf_slim as slim" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "KHEa_fzD-MGG", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):\n", - "\t# Skip pointwise by setting num_outputs=Non\n", - "\tnet = slim.conv2d(inputs, n_filters, kernel_size=[1, 1], activation_fn=None)\n", - "\tnet = slim.batch_norm(net, fused=True)\n", - "\tnet = tf.nn.relu(net)\n", - "\treturn net" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "aMUbfSp5-Oof", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def DepthwiseSeparableConvBlock(inputs, n_filters, kernel_size=[3, 3]):\n", - "\n", - "\t# Skip pointwise by setting num_outputs=None\n", - "\tnet = slim.separable_convolution2d(inputs, num_outputs=None, depth_multiplier=1, kernel_size=[3, 3], activation_fn=None)\n", - "\n", - "\tnet = slim.batch_norm(net, fused=True)\n", - "\tnet = tf.nn.relu(net)\n", - "\tnet = slim.conv2d(net, n_filters, kernel_size=[1, 1], activation_fn=None)\n", - "\tnet = slim.batch_norm(net, fused=True)\n", - "\tnet = tf.nn.relu(net)\n", - "\treturn net\n" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "6SOwKiuD-Qpf", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def conv_transpose_block(inputs, n_filters, kernel_size=[3, 3]):\n", - "\n", - "\tnet = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)\n", - "\tnet = tf.nn.relu(slim.batch_norm(net))\n", - "\treturn net" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "QfaEJ4Mn-bCm", - "colab_type": "code", - "colab": {} - }, - "source": [ - "\n", - "def build_ynet():\n", - "\n", - "\n", - " \n", - "\t# Downsampling path #\n", - "\t\n", - " inputs=Input((256,256,3))\n", - " num_classes = 4\n", - " net = ConvBlock(inputs, 64)\n", - " net = DepthwiseSeparableConvBlock(net, 64)\n", - " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", - " skip_1 = net\n", - "\n", - " net = DepthwiseSeparableConvBlock(net, 128)\n", - " net = DepthwiseSeparableConvBlock(net, 128)\n", - " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", - " skip_2 = net\n", - "\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", - " skip_3 = net\n", - "\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", - " skip_4 = net\n", - "\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", - "\n", - "\n", - "\t\n", - "\t# Upsampling path #\n", - "\t\n", - " net = conv_transpose_block(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = tf.add(net, skip_4)\n", - "\n", - " net = conv_transpose_block(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 512)\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = tf.add(net, skip_3)\n", - "\n", - " net = conv_transpose_block(net, 256)\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = DepthwiseSeparableConvBlock(net, 256)\n", - " net = DepthwiseSeparableConvBlock(net, 128)\n", - " net = tf.add(net, skip_2)\n", - "\n", - " net = conv_transpose_block(net, 128)\n", - " net = DepthwiseSeparableConvBlock(net, 128)\n", - " net = DepthwiseSeparableConvBlock(net, 64)\n", - " net = tf.add(net, skip_1)\n", - "\n", - " net = conv_transpose_block(net, 64)\n", - " net = DepthwiseSeparableConvBlock(net, 64)\n", - " net = DepthwiseSeparableConvBlock(net, 64)\n", - "\n", - "\t\n", - "\t# Softmax #\n", - "\t\n", - " net = tf.nn.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')\n", - " return net" - ], - "execution_count": null, - "outputs": [] - }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "b9jdznlx_6Pd" + }, + "outputs": [], + "source": [ + "import glob\n", + "from PIL import Image\n", + "\n", + "PATCH_SIZE = 256\n", + "\n", + "# This is the folder you'll find after extracting the dataset.\n", + "train_folder = './data/images'\n", + "labels = glob.glob(train_folder + '/*.tif')\n", + "labels.sort()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "GbQmmfIv_89L" + }, + "outputs": [], + "source": [ + "for item in labels:\n", + " try:\n", + " with Image.open(item) as img:\n", + " extractor = PatchExtractor(img=img, patch_size=PATCH_SIZE, stride=256)\n", + " patches = extractor.extract_patches()\n", + " count = 0\n", + " for p in patches:\n", + " count += 1\n", + "# print('./data/images/patches_i/' + str(count) + '_' + item.split('/')[-1][:-3] + 'png')\n", + " p.save('./data/images/patches_i/' + str(count) + '_' + item.split('/')[-1][:-3] + 'png')\n", + " except Exception as error:\n", + " print('error with', item, error)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tJOZuT_1__X7" + }, + "outputs": [], + "source": [ + "labels = glob.glob('./data/br_masks/*.png')\n", + "labels.sort()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "dS3mNinvABQC" + }, + "outputs": [], + "source": [ + "for item in labels:\n", + " try:\n", + " with Image.open(item) as img:\n", + " extractor = PatchExtractor(img=img, patch_size=PATCH_SIZE, stride=256)\n", + " patches = extractor.extract_patches()\n", + " count = 0\n", + " for p in patches:\n", + " count += 1\n", + " # print('./train/' + value + '/' + str(count) + '_' + key.split('/')[-1])\n", + " p.save('./data/br_masks/patches_m/' + str(count) + '_' + item.split('/')[-1])\n", + " except Exception as error:\n", + " print('error with', item, error)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "eSzM4cIhADqQ" + }, + "outputs": [], + "source": [ + "images = [item for item in glob.glob('data/images/patches_i/*')]\n", + "masks = [item for item in glob.glob('data/br_masks/patches_m/*')]\n", + "images.sort()\n", + "masks.sort()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "3_3IMuP7_Df7" + }, + "outputs": [ { - "cell_type": "code", - "metadata": { - "id": "lULQA9OB-kd1", - "colab_type": "code", - "colab": {} - }, - "source": [ - "model = build_ynet()" - ], - "execution_count": null, - "outputs": [] - }, + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "import sys\n", + "import os\n", + "import numpy as np\n", + "from keras.utils import Progbar\n", + "from skimage.io import imread, imshow, imread_collection, concatenate_images\n", + "from skimage.transform import resize\n", + "from skimage.morphology import label\n", + "from itertools import chain\n", + "from skimage.io import imread, imshow, imread_collection, concatenate_images\n", + "from skimage.transform import resize\n", + "from skimage.morphology import label\n", + "from tensorflow.keras.models import Model, load_model\n", + "from tensorflow.keras.layers import Input\n", + "# Function read train images and mask return as nump array\n", + "def read_train_data(IMG_WIDTH=256,IMG_HEIGHT=256,IMG_CHANNELS=3):\n", + " X_train = np.zeros((len(images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\n", + " Y_train = np.zeros((len(masks), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n", + " print('Getting and resizing train images and masks ... ')\n", + " sys.stdout.flush()\n", + " if os.path.isfile(\"train_img.npy\") and os.path.isfile(\"train_mask.npy\"):\n", + " print(\"Train file loaded from memory\")\n", + " X_train = np.load(\"train_img.npy\")\n", + " Y_train = np.load(\"train_mask.npy\")\n", + " return X_train,Y_train\n", + " a = Progbar(len(images))\n", + " for n, p in enumerate(zip(images, masks)):\n", + " path = p[0]\n", + " img = imread(path)[:,:,:IMG_CHANNELS]\n", + " img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\n", + " X_train[n] = img\n", + " mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\n", + " mask_ = imread(p[1])\n", + " mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', \n", + " preserve_range=True), axis=-1)\n", + " mask = np.maximum(mask, mask_)\n", + " Y_train[n] = mask\n", + " a.update(n)\n", + " np.save(\"train_img\",X_train)\n", + " np.save(\"train_mask\",Y_train)\n", + " return X_train,Y_train" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "w2R9ukO6_G9W" + }, + "outputs": [ { - "cell_type": "code", - "metadata": { - "id": "GHwSOHO2-zy1", - "colab_type": "code", - "colab": {} - }, - "source": [ - "model.fit(train_img,train_mask,batch_size=50,epochs=5)" - ], - "execution_count": null, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "Getting and resizing train images and masks ... \n", + "12815/12816 [============================>.] - ETA: 0s" + ] + } + ], + "source": [ + "train_img,train_mask = read_train_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "LbtzgcSf-GwT" + }, + "outputs": [], + "source": [ + "import os,time,cv2\n", + "import tensorflow as tf\n", + "import numpy as np\n", + "import tf_slim as slim" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "KHEa_fzD-MGG" + }, + "outputs": [], + "source": [ + "def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):\n", + "\t# Skip pointwise by setting num_outputs=Non\n", + "\tnet = slim.conv2d(inputs, n_filters, kernel_size=[1, 1], activation_fn=None)\n", + "\tnet = slim.batch_norm(net, fused=True)\n", + "\tnet = tf.nn.relu(net)\n", + "\treturn net" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "aMUbfSp5-Oof" + }, + "outputs": [], + "source": [ + "def DepthwiseSeparableConvBlock(inputs, n_filters, kernel_size=[3, 3]):\n", + "\n", + "\t# Skip pointwise by setting num_outputs=None\n", + "\tnet = slim.separable_convolution2d(inputs, num_outputs=None, depth_multiplier=1, kernel_size=[3, 3], activation_fn=None)\n", + "\n", + "\tnet = slim.batch_norm(net, fused=True)\n", + "\tnet = tf.nn.relu(net)\n", + "\tnet = slim.conv2d(net, n_filters, kernel_size=[1, 1], activation_fn=None)\n", + "\tnet = slim.batch_norm(net, fused=True)\n", + "\tnet = tf.nn.relu(net)\n", + "\treturn net\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "6SOwKiuD-Qpf" + }, + "outputs": [], + "source": [ + "def conv_transpose_block(inputs, n_filters, kernel_size=[3, 3]):\n", + "\n", + "\tnet = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)\n", + "\tnet = tf.nn.relu(slim.batch_norm(net))\n", + "\treturn net" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "QfaEJ4Mn-bCm" + }, + "outputs": [], + "source": [ + "\n", + "def build_ynet():\n", + "\n", + "\n", + " \n", + "\t# Downsampling path #\n", + "\t\n", + " inputs=Input((256,256,3))\n", + " num_classes = 4\n", + " net = ConvBlock(inputs, 64)\n", + " net = DepthwiseSeparableConvBlock(net, 64)\n", + " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", + " skip_1 = net\n", + "\n", + " net = DepthwiseSeparableConvBlock(net, 128)\n", + " net = DepthwiseSeparableConvBlock(net, 128)\n", + " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", + " skip_2 = net\n", + "\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", + " skip_3 = net\n", + "\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", + " skip_4 = net\n", + "\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = tf.nn.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')\n", + "\n", + "\n", + "\t\n", + "\t# Upsampling path #\n", + "\t\n", + " net = conv_transpose_block(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = tf.add(net, skip_4)\n", + "\n", + " net = conv_transpose_block(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 512)\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = tf.add(net, skip_3)\n", + "\n", + " net = conv_transpose_block(net, 256)\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = DepthwiseSeparableConvBlock(net, 256)\n", + " net = DepthwiseSeparableConvBlock(net, 128)\n", + " net = tf.add(net, skip_2)\n", + "\n", + " net = conv_transpose_block(net, 128)\n", + " net = DepthwiseSeparableConvBlock(net, 128)\n", + " net = DepthwiseSeparableConvBlock(net, 64)\n", + " net = tf.add(net, skip_1)\n", + "\n", + " net = conv_transpose_block(net, 64)\n", + " net = DepthwiseSeparableConvBlock(net, 64)\n", + " net = DepthwiseSeparableConvBlock(net, 64)\n", + "\n", + "\t\n", + "\t# Softmax #\n", + "\t\n", + " net = tf.nn.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')\n", + " return net" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 }, + "colab_type": "code", + "id": "Xjsw3qqJ09rB", + "outputId": "3a1075a6-2e39-4594-c491-39cc2835ac2a" + }, + "outputs": [ { - "cell_type": "code", - "metadata": { - "id": "4faTuAdfAfuM", - "colab_type": "code", - "colab": {} - }, - "source": [ - "model.save('model.hdf5')" - ], - "execution_count": null, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: \"model_2\"\n", + "__________________________________________________________________________________________________\n", + "Layer (type) Output Shape Param # Connected to \n", + "==================================================================================================\n", + "input_2 (InputLayer) (None, 256, 256, 3) 0 \n", + "__________________________________________________________________________________________________\n", + "conv2d_20 (Conv2D) (None, 256, 256, 64) 1792 input_2[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_10 (Dropout) (None, 256, 256, 64) 0 conv2d_20[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_21 (Conv2D) (None, 256, 256, 32) 18464 dropout_10[0][0] \n", + "__________________________________________________________________________________________________\n", + "max_pooling2d_5 (MaxPooling2D) (None, 128, 128, 32) 0 conv2d_21[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_22 (Conv2D) (None, 128, 128, 32) 9248 max_pooling2d_5[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_11 (Dropout) (None, 128, 128, 32) 0 conv2d_22[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_23 (Conv2D) (None, 128, 128, 32) 9248 dropout_11[0][0] \n", + "__________________________________________________________________________________________________\n", + "max_pooling2d_6 (MaxPooling2D) (None, 64, 64, 32) 0 conv2d_23[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_24 (Conv2D) (None, 64, 64, 64) 18496 max_pooling2d_6[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_12 (Dropout) (None, 64, 64, 64) 0 conv2d_24[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_25 (Conv2D) (None, 64, 64, 64) 36928 dropout_12[0][0] \n", + "__________________________________________________________________________________________________\n", + "max_pooling2d_7 (MaxPooling2D) (None, 32, 32, 64) 0 conv2d_25[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_26 (Conv2D) (None, 32, 32, 128) 73856 max_pooling2d_7[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_13 (Dropout) (None, 32, 32, 128) 0 conv2d_26[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_27 (Conv2D) (None, 32, 32, 128) 147584 dropout_13[0][0] \n", + "__________________________________________________________________________________________________\n", + "max_pooling2d_8 (MaxPooling2D) (None, 16, 16, 128) 0 conv2d_27[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_28 (Conv2D) (None, 16, 16, 256) 295168 max_pooling2d_8[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_14 (Dropout) (None, 16, 16, 256) 0 conv2d_28[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_29 (Conv2D) (None, 16, 16, 256) 590080 dropout_14[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_transpose_5 (Conv2DTrans (None, 32, 32, 128) 131200 conv2d_29[0][0] \n", + "__________________________________________________________________________________________________\n", + "concatenate_5 (Concatenate) (None, 32, 32, 256) 0 conv2d_transpose_5[0][0] \n", + " conv2d_27[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_30 (Conv2D) (None, 32, 32, 128) 295040 concatenate_5[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_15 (Dropout) (None, 32, 32, 128) 0 conv2d_30[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_31 (Conv2D) (None, 32, 32, 128) 147584 dropout_15[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_transpose_6 (Conv2DTrans (None, 64, 64, 64) 32832 conv2d_31[0][0] \n", + "__________________________________________________________________________________________________\n", + "concatenate_6 (Concatenate) (None, 64, 64, 128) 0 conv2d_transpose_6[0][0] \n", + " conv2d_25[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_32 (Conv2D) (None, 64, 64, 64) 73792 concatenate_6[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_16 (Dropout) (None, 64, 64, 64) 0 conv2d_32[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_33 (Conv2D) (None, 64, 64, 64) 36928 dropout_16[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_transpose_7 (Conv2DTrans (None, 128, 128, 32) 8224 conv2d_33[0][0] \n", + "__________________________________________________________________________________________________\n", + "concatenate_7 (Concatenate) (None, 128, 128, 64) 0 conv2d_transpose_7[0][0] \n", + " conv2d_23[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_34 (Conv2D) (None, 128, 128, 32) 18464 concatenate_7[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_17 (Dropout) (None, 128, 128, 32) 0 conv2d_34[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_35 (Conv2D) (None, 128, 128, 32) 9248 dropout_17[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_transpose_8 (Conv2DTrans (None, 256, 256, 16) 2064 conv2d_35[0][0] \n", + "__________________________________________________________________________________________________\n", + "concatenate_8 (Concatenate) (None, 256, 256, 48) 0 conv2d_transpose_8[0][0] \n", + " conv2d_21[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_36 (Conv2D) (None, 256, 256, 16) 6928 concatenate_8[0][0] \n", + "__________________________________________________________________________________________________\n", + "dropout_18 (Dropout) (None, 256, 256, 16) 0 conv2d_36[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_37 (Conv2D) (None, 256, 256, 16) 2320 dropout_18[0][0] \n", + "__________________________________________________________________________________________________\n", + "conv2d_38 (Conv2D) (None, 256, 256, 1) 17 conv2d_37[0][0] \n", + "==================================================================================================\n", + "Total params: 1,965,505\n", + "Trainable params: 1,965,505\n", + "Non-trainable params: 0\n", + "__________________________________________________________________________________________________\n" + ] } - ] -} \ No newline at end of file + ], + "source": [ + "model = build_ynet()\n", + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "4faTuAdfAfuM" + }, + "outputs": [], + "source": [ + "model.fit(train_img,train_mask,batch_size=50,epochs=5)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "YNet.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}