diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_22_0.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_22_0.png
index 71852eae135..72c526d666e 100644
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_22_0.png and b/examples/vision/img/siamese_contrastive/siamese_contrastive_22_0.png differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_24_0.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_24_0.png
index d794f1418e6..bf6e023e748 100644
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_24_0.png and b/examples/vision/img/siamese_contrastive/siamese_contrastive_24_0.png differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_26_0.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_26_0.png
index 244b09e42bf..c8c9a0d16eb 100644
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_26_0.png and b/examples/vision/img/siamese_contrastive/siamese_contrastive_26_0.png differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_36_0.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_36_0.png
index ed314b54e08..c567a393ea3 100644
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_36_0.png and b/examples/vision/img/siamese_contrastive/siamese_contrastive_36_0.png differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_36_1.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_36_1.png
index e8174fec9f5..c2c35675dc7 100644
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_36_1.png and b/examples/vision/img/siamese_contrastive/siamese_contrastive_36_1.png differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_40_0.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_40_0.png
deleted file mode 100644
index eeb82e9832b..00000000000
Binary files a/examples/vision/img/siamese_contrastive/siamese_contrastive_40_0.png and /dev/null differ
diff --git a/examples/vision/img/siamese_contrastive/siamese_contrastive_40_1.png b/examples/vision/img/siamese_contrastive/siamese_contrastive_40_1.png
new file mode 100644
index 00000000000..e1ae268ede9
Binary files /dev/null and b/examples/vision/img/siamese_contrastive/siamese_contrastive_40_1.png differ
diff --git a/examples/vision/ipynb/siamese_contrastive.ipynb b/examples/vision/ipynb/siamese_contrastive.ipynb
index 29315ab21d6..d38dd8c3719 100644
--- a/examples/vision/ipynb/siamese_contrastive.ipynb
+++ b/examples/vision/ipynb/siamese_contrastive.ipynb
@@ -51,9 +51,8 @@
"source": [
"import random\n",
"import numpy as np\n",
- "import tensorflow as tf\n",
- "from tensorflow import keras\n",
- "from tensorflow.keras import layers\n",
+ "import keras\n",
+ "from keras import ops\n",
"import matplotlib.pyplot as plt"
]
},
@@ -350,14 +349,13 @@
" # Plot the images\n",
" fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))\n",
" for i in range(to_show):\n",
- "\n",
" # If the number of rows is 1, the axes array is one-dimensional\n",
" if num_row == 1:\n",
" ax = axes[i % num_col]\n",
" else:\n",
" ax = axes[i // num_col, i % num_col]\n",
"\n",
- " ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap=\"gray\")\n",
+ " ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap=\"gray\")\n",
" ax.set_axis_off()\n",
" if test:\n",
" ax.set_title(\"True: {} | Pred: {:.5f}\".format(labels[i], predictions[i][0]))\n",
@@ -453,6 +451,7 @@
},
"outputs": [],
"source": [
+ "\n",
"# Provided two tensors t1 and t2\n",
"# Euclidean distance = sqrt(sum(square(t1-t2)))\n",
"def euclidean_distance(vects):\n",
@@ -467,25 +466,25 @@
" \"\"\"\n",
"\n",
" x, y = vects\n",
- " sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)\n",
- " return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))\n",
+ " sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)\n",
+ " return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))\n",
"\n",
"\n",
- "input = layers.Input((28, 28, 1))\n",
- "x = tf.keras.layers.BatchNormalization()(input)\n",
- "x = layers.Conv2D(4, (5, 5), activation=\"tanh\")(x)\n",
- "x = layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
- "x = layers.Conv2D(16, (5, 5), activation=\"tanh\")(x)\n",
- "x = layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
- "x = layers.Flatten()(x)\n",
+ "input = keras.layers.Input((28, 28, 1))\n",
+ "x = keras.layers.BatchNormalization()(input)\n",
+ "x = keras.layers.Conv2D(4, (5, 5), activation=\"tanh\")(x)\n",
+ "x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
+ "x = keras.layers.Conv2D(16, (5, 5), activation=\"tanh\")(x)\n",
+ "x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)\n",
+ "x = keras.layers.Flatten()(x)\n",
"\n",
- "x = tf.keras.layers.BatchNormalization()(x)\n",
- "x = layers.Dense(10, activation=\"tanh\")(x)\n",
+ "x = keras.layers.BatchNormalization()(x)\n",
+ "x = keras.layers.Dense(10, activation=\"tanh\")(x)\n",
"embedding_network = keras.Model(input, x)\n",
"\n",
"\n",
- "input_1 = layers.Input((28, 28, 1))\n",
- "input_2 = layers.Input((28, 28, 1))\n",
+ "input_1 = keras.layers.Input((28, 28, 1))\n",
+ "input_2 = keras.layers.Input((28, 28, 1))\n",
"\n",
"# As mentioned above, Siamese Network share weights between\n",
"# tower networks (sister networks). To allow this, we will use\n",
@@ -493,9 +492,11 @@
"tower_1 = embedding_network(input_1)\n",
"tower_2 = embedding_network(input_2)\n",
"\n",
- "merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])\n",
- "normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)\n",
- "output_layer = layers.Dense(1, activation=\"sigmoid\")(normal_layer)\n",
+ "merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(\n",
+ " [tower_1, tower_2]\n",
+ ")\n",
+ "normal_layer = keras.layers.BatchNormalization()(merge_layer)\n",
+ "output_layer = keras.layers.Dense(1, activation=\"sigmoid\")(normal_layer)\n",
"siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)\n",
""
]
@@ -543,11 +544,9 @@
" A tensor containing contrastive loss as floating point value.\n",
" \"\"\"\n",
"\n",
- " square_pred = tf.math.square(y_pred)\n",
- " margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))\n",
- " return tf.math.reduce_mean(\n",
- " (1 - y_true) * square_pred + (y_true) * margin_square\n",
- " )\n",
+ " square_pred = ops.square(y_pred)\n",
+ " margin_square = ops.square(ops.maximum(margin - (y_pred), 0))\n",
+ " return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)\n",
"\n",
" return contrastive_loss\n",
""
diff --git a/examples/vision/md/siamese_contrastive.md b/examples/vision/md/siamese_contrastive.md
index 61f37d6126f..286194b3f21 100644
--- a/examples/vision/md/siamese_contrastive.md
+++ b/examples/vision/md/siamese_contrastive.md
@@ -29,9 +29,8 @@ the class segmentation of the training inputs.
```python
import random
import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
+import keras
+from keras import ops
import matplotlib.pyplot as plt
```
@@ -58,14 +57,6 @@ x_test = x_test.astype("float32")
```
-
-```
-Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
-11493376/11490434 [==============================] - 1s 0us/step
-11501568/11490434 [==============================] - 1s 0us/step
-
-```
-
---
## Define training and validation sets
@@ -230,14 +221,13 @@ def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False)
# Plot the images
fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))
for i in range(to_show):
-
# If the number of rows is 1, the axes array is one-dimensional
if num_row == 1:
ax = axes[i % num_col]
else:
ax = axes[i // num_col, i % num_col]
- ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
+ ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
@@ -300,6 +290,7 @@ merged output is fed to the final network.
```python
+
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects):
@@ -314,25 +305,25 @@ def euclidean_distance(vects):
"""
x, y = vects
- sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)
- return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))
+ sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)
+ return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))
-input = layers.Input((28, 28, 1))
-x = tf.keras.layers.BatchNormalization()(input)
-x = layers.Conv2D(4, (5, 5), activation="tanh")(x)
-x = layers.AveragePooling2D(pool_size=(2, 2))(x)
-x = layers.Conv2D(16, (5, 5), activation="tanh")(x)
-x = layers.AveragePooling2D(pool_size=(2, 2))(x)
-x = layers.Flatten()(x)
+input = keras.layers.Input((28, 28, 1))
+x = keras.layers.BatchNormalization()(input)
+x = keras.layers.Conv2D(4, (5, 5), activation="tanh")(x)
+x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
+x = keras.layers.Conv2D(16, (5, 5), activation="tanh")(x)
+x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
+x = keras.layers.Flatten()(x)
-x = tf.keras.layers.BatchNormalization()(x)
-x = layers.Dense(10, activation="tanh")(x)
+x = keras.layers.BatchNormalization()(x)
+x = keras.layers.Dense(10, activation="tanh")(x)
embedding_network = keras.Model(input, x)
-input_1 = layers.Input((28, 28, 1))
-input_2 = layers.Input((28, 28, 1))
+input_1 = keras.layers.Input((28, 28, 1))
+input_2 = keras.layers.Input((28, 28, 1))
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
@@ -340,9 +331,11 @@ input_2 = layers.Input((28, 28, 1))
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
-merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])
-normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)
-output_layer = layers.Dense(1, activation="sigmoid")(normal_layer)
+merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(
+ [tower_1, tower_2]
+)
+normal_layer = keras.layers.BatchNormalization()(merge_layer)
+output_layer = keras.layers.Dense(1, activation="sigmoid")(normal_layer)
siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
```
@@ -378,11 +371,9 @@ def loss(margin=1):
A tensor containing contrastive loss as floating point value.
"""
- square_pred = tf.math.square(y_pred)
- margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))
- return tf.math.reduce_mean(
- (1 - y_true) * square_pred + (y_true) * margin_square
- )
+ square_pred = ops.square(y_pred)
+ margin_square = ops.square(ops.maximum(margin - (y_pred), 0))
+ return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)
return contrastive_loss
@@ -398,35 +389,55 @@ siamese.summary()
```
-
-```
-Model: "model_1"
-__________________________________________________________________________________________________
- Layer (type) Output Shape Param # Connected to
-==================================================================================================
- input_2 (InputLayer) [(None, 28, 28, 1)] 0 []
-
- input_3 (InputLayer) [(None, 28, 28, 1)] 0 []
-
- model (Functional) (None, 10) 5318 ['input_2[0][0]',
- 'input_3[0][0]']
-
- lambda (Lambda) (None, 1) 0 ['model[0][0]',
- 'model[1][0]']
-
- batch_normalization_2 (BatchNo (None, 1) 4 ['lambda[0][0]']
- rmalization)
-
- dense_1 (Dense) (None, 1) 2 ['batch_normalization_2[0][0]']
-
-==================================================================================================
-Total params: 5,324
-Trainable params: 4,808
-Non-trainable params: 516
-__________________________________________________________________________________________________
-```
-
+Model: "functional_3"
+
+
+
+
+
+┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
+┃ Layer (type) ┃ Output Shape ┃ Param # ┃ Connected to ┃
+┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
+│ input_layer_1 │ (None, 28, 28, 1) │ 0 │ - │
+│ (InputLayer) │ │ │ │
+├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
+│ input_layer_2 │ (None, 28, 28, 1) │ 0 │ - │
+│ (InputLayer) │ │ │ │
+├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
+│ functional_1 │ (None, 10) │ 5,318 │ input_layer_1[0][0], │
+│ (Functional) │ │ │ input_layer_2[0][0] │
+├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
+│ lambda (Lambda) │ (None, 1) │ 0 │ functional_1[0][0], │
+│ │ │ │ functional_1[1][0] │
+├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
+│ batch_normalizatio… │ (None, 1) │ 4 │ lambda[0][0] │
+│ (BatchNormalizatio… │ │ │ │
+├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
+│ dense_1 (Dense) │ (None, 1) │ 2 │ batch_normalization… │
+└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
+
+
+
+
+
+ Total params: 5,324 (20.80 KB)
+
+
+
+
+
+ Trainable params: 4,808 (18.78 KB)
+
+
+
+
+
+ Non-trainable params: 516 (2.02 KB)
+
+
+
+
---
## Train the model
@@ -444,25 +455,25 @@ history = siamese.fit(
```
Epoch 1/10
-3750/3750 [==============================] - 32s 8ms/step - loss: 0.0889 - accuracy: 0.8784 - val_loss: 0.0369 - val_accuracy: 0.9520
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 78s 20ms/step - accuracy: 0.7952 - loss: 0.1439 - val_accuracy: 0.8965 - val_loss: 0.0784
Epoch 2/10
-3750/3750 [==============================] - 34s 9ms/step - loss: 0.0522 - accuracy: 0.9308 - val_loss: 0.0299 - val_accuracy: 0.9610
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 73s 20ms/step - accuracy: 0.8629 - loss: 0.1019 - val_accuracy: 0.9112 - val_loss: 0.0674
Epoch 3/10
-3750/3750 [==============================] - 33s 9ms/step - loss: 0.0447 - accuracy: 0.9406 - val_loss: 0.0234 - val_accuracy: 0.9700
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 77s 21ms/step - accuracy: 0.8935 - loss: 0.0796 - val_accuracy: 0.9167 - val_loss: 0.0620
Epoch 4/10
-3750/3750 [==============================] - 34s 9ms/step - loss: 0.0388 - accuracy: 0.9496 - val_loss: 0.0216 - val_accuracy: 0.9719
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 73s 20ms/step - accuracy: 0.9048 - loss: 0.0710 - val_accuracy: 0.8970 - val_loss: 0.0761
Epoch 5/10
-3750/3750 [==============================] - 31s 8ms/step - loss: 0.0372 - accuracy: 0.9517 - val_loss: 0.0178 - val_accuracy: 0.9768
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 74s 20ms/step - accuracy: 0.9122 - loss: 0.0665 - val_accuracy: 0.8840 - val_loss: 0.0850
Epoch 6/10
-3750/3750 [==============================] - 33s 9ms/step - loss: 0.0331 - accuracy: 0.9572 - val_loss: 0.0186 - val_accuracy: 0.9761
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 74s 20ms/step - accuracy: 0.9182 - loss: 0.0618 - val_accuracy: 0.9280 - val_loss: 0.0550
Epoch 7/10
-3750/3750 [==============================] - 34s 9ms/step - loss: 0.0322 - accuracy: 0.9584 - val_loss: 0.0185 - val_accuracy: 0.9766
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 73s 19ms/step - accuracy: 0.9175 - loss: 0.0608 - val_accuracy: 0.7186 - val_loss: 0.2191
Epoch 8/10
-3750/3750 [==============================] - 35s 9ms/step - loss: 0.0309 - accuracy: 0.9598 - val_loss: 0.0167 - val_accuracy: 0.9786
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 73s 19ms/step - accuracy: 0.9178 - loss: 0.0618 - val_accuracy: 0.8560 - val_loss: 0.1063
Epoch 9/10
-3750/3750 [==============================] - 34s 9ms/step - loss: 0.0304 - accuracy: 0.9604 - val_loss: 0.0152 - val_accuracy: 0.9804
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 74s 20ms/step - accuracy: 0.9145 - loss: 0.0646 - val_accuracy: 0.8485 - val_loss: 0.1100
Epoch 10/10
-3750/3750 [==============================] - 31s 8ms/step - loss: 0.0304 - accuracy: 0.9607 - val_loss: 0.0146 - val_accuracy: 0.9815
+ 3750/3750 ━━━━━━━━━━━━━━━━━━━━ 74s 20ms/step - accuracy: 0.9154 - loss: 0.0635 - val_accuracy: 0.8631 - val_loss: 0.1029
```
@@ -524,8 +535,8 @@ print("test loss, test acc:", results)
```
-625/625 [==============================] - 2s 3ms/step - loss: 0.0132 - accuracy: 0.9830
-test loss, test acc: [0.013175321742892265, 0.9830499887466431]
+ 625/625 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8585 - loss: 0.1050
+test loss, test acc: [0.09949221462011337, 0.8666499853134155]
```
@@ -538,9 +549,14 @@ predictions = siamese.predict([x_test_1, x_test_2])
visualize(pairs_test, labels_test, to_show=3, predictions=predictions, test=True)
```
+
+```
+ 625/625 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step
+```
+
-![png](/img/examples/vision/siamese_contrastive/siamese_contrastive_40_0.png)
+![png](/img/examples/vision/siamese_contrastive/siamese_contrastive_40_1.png)
diff --git a/examples/vision/siamese_contrastive.py b/examples/vision/siamese_contrastive.py
index ca982ecd9a2..8054b44a702 100644
--- a/examples/vision/siamese_contrastive.py
+++ b/examples/vision/siamese_contrastive.py
@@ -26,9 +26,8 @@
import random
import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
+import keras
+from keras import ops
import matplotlib.pyplot as plt
"""
@@ -213,7 +212,7 @@ def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False)
else:
ax = axes[i // num_col, i % num_col]
- ax.imshow(tf.concat([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
+ ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
@@ -268,25 +267,25 @@ def euclidean_distance(vects):
"""
x, y = vects
- sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)
- return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon()))
+ sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)
+ return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))
-input = layers.Input((28, 28, 1))
-x = tf.keras.layers.BatchNormalization()(input)
-x = layers.Conv2D(4, (5, 5), activation="tanh")(x)
-x = layers.AveragePooling2D(pool_size=(2, 2))(x)
-x = layers.Conv2D(16, (5, 5), activation="tanh")(x)
-x = layers.AveragePooling2D(pool_size=(2, 2))(x)
-x = layers.Flatten()(x)
+input = keras.layers.Input((28, 28, 1))
+x = keras.layers.BatchNormalization()(input)
+x = keras.layers.Conv2D(4, (5, 5), activation="tanh")(x)
+x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
+x = keras.layers.Conv2D(16, (5, 5), activation="tanh")(x)
+x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
+x = keras.layers.Flatten()(x)
-x = tf.keras.layers.BatchNormalization()(x)
-x = layers.Dense(10, activation="tanh")(x)
+x = keras.layers.BatchNormalization()(x)
+x = keras.layers.Dense(10, activation="tanh")(x)
embedding_network = keras.Model(input, x)
-input_1 = layers.Input((28, 28, 1))
-input_2 = layers.Input((28, 28, 1))
+input_1 = keras.layers.Input((28, 28, 1))
+input_2 = keras.layers.Input((28, 28, 1))
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
@@ -294,9 +293,11 @@ def euclidean_distance(vects):
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
-merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])
-normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)
-output_layer = layers.Dense(1, activation="sigmoid")(normal_layer)
+merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(
+ [tower_1, tower_2]
+)
+normal_layer = keras.layers.BatchNormalization()(merge_layer)
+output_layer = keras.layers.Dense(1, activation="sigmoid")(normal_layer)
siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
@@ -330,11 +331,9 @@ def contrastive_loss(y_true, y_pred):
A tensor containing contrastive loss as floating point value.
"""
- square_pred = tf.math.square(y_pred)
- margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))
- return tf.math.reduce_mean(
- (1 - y_true) * square_pred + (y_true) * margin_square
- )
+ square_pred = ops.square(y_pred)
+ margin_square = ops.square(ops.maximum(margin - (y_pred), 0))
+ return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)
return contrastive_loss