Skip to content

Commit

Permalink
Update the siamese contrastive example for Keras 3.
Browse files Browse the repository at this point in the history
  • Loading branch information
hertschuh committed Nov 9, 2023
1 parent 26caa15 commit aa36712
Show file tree
Hide file tree
Showing 10 changed files with 140 additions and 126 deletions.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
51 changes: 25 additions & 26 deletions examples/vision/ipynb/siamese_contrastive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,8 @@
"source": [
"import random\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"import keras\n",
"from keras import ops\n",
"import matplotlib.pyplot as plt"
]
},
Expand Down Expand Up @@ -350,14 +349,13 @@
" # Plot the images\n",
" fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))\n",
" for i in range(to_show):\n",
"\n",
" # If the number of rows is 1, the axes array is one-dimensional\n",
" if num_row == 1:\n",
" ax = axes[i % num_col]\n",
" else:\n",
" ax = axes[i // num_col, i % num_col]\n",
"\n",
" ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap=\"gray\")\n",
" ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap=\"gray\")\n",
" ax.set_axis_off()\n",
" if test:\n",
" ax.set_title(\"True: {} | Pred: {:.5f}\".format(labels[i], predictions[i][0]))\n",
Expand Down Expand Up @@ -453,6 +451,7 @@
},
"outputs": [],
"source": [
"\n",
"# Provided two tensors t1 and t2\n",
"# Euclidean distance = sqrt(sum(square(t1-t2)))\n",
"def euclidean_distance(vects):\n",
Expand All @@ -467,35 +466,37 @@
" \"\"\"\n",
"\n",
" x, y = vects\n",
" sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)\n",
" return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))\n",
" sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)\n",
" return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))\n",
"\n",
"\n",
"input = layers.Input((28, 28, 1))\n",
"x = tf.keras.layers.BatchNormalization()(input)\n",
"x = layers.Conv2D(4, (5, 5), activation=\"tanh\")(x)\n",
"x = layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
"x = layers.Conv2D(16, (5, 5), activation=\"tanh\")(x)\n",
"x = layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
"x = layers.Flatten()(x)\n",
"input = keras.layers.Input((28, 28, 1))\n",
"x = keras.layers.BatchNormalization()(input)\n",
"x = keras.layers.Conv2D(4, (5, 5), activation=\"tanh\")(x)\n",
"x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
"x = keras.layers.Conv2D(16, (5, 5), activation=\"tanh\")(x)\n",
"x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
"x = keras.layers.Flatten()(x)\n",
"\n",
"x = tf.keras.layers.BatchNormalization()(x)\n",
"x = layers.Dense(10, activation=\"tanh\")(x)\n",
"x = keras.layers.BatchNormalization()(x)\n",
"x = keras.layers.Dense(10, activation=\"tanh\")(x)\n",
"embedding_network = keras.Model(input, x)\n",
"\n",
"\n",
"input_1 = layers.Input((28, 28, 1))\n",
"input_2 = layers.Input((28, 28, 1))\n",
"input_1 = keras.layers.Input((28, 28, 1))\n",
"input_2 = keras.layers.Input((28, 28, 1))\n",
"\n",
"# As mentioned above, Siamese Network share weights between\n",
"# tower networks (sister networks). To allow this, we will use\n",
"# same embedding network for both tower networks.\n",
"tower_1 = embedding_network(input_1)\n",
"tower_2 = embedding_network(input_2)\n",
"\n",
"merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])\n",
"normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)\n",
"output_layer = layers.Dense(1, activation=\"sigmoid\")(normal_layer)\n",
"merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(\n",
" [tower_1, tower_2]\n",
")\n",
"normal_layer = keras.layers.BatchNormalization()(merge_layer)\n",
"output_layer = keras.layers.Dense(1, activation=\"sigmoid\")(normal_layer)\n",
"siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)\n",
""
]
Expand Down Expand Up @@ -543,11 +544,9 @@
" A tensor containing contrastive loss as floating point value.\n",
" \"\"\"\n",
"\n",
" square_pred = tf.math.square(y_pred)\n",
" margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))\n",
" return tf.math.reduce_mean(\n",
" (1 - y_true) * square_pred + (y_true) * margin_square\n",
" )\n",
" square_pred = ops.square(y_pred)\n",
" margin_square = ops.square(ops.maximum(margin - (y_pred), 0))\n",
" return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)\n",
"\n",
" return contrastive_loss\n",
""
Expand Down
Loading

0 comments on commit aa36712

Please sign in to comment.