From b01dfd06b7d3ae0fa98a5ada9ce08b2f4eaf09a2 Mon Sep 17 00:00:00 2001 From: cofri Date: Mon, 24 Jul 2023 14:13:50 +0200 Subject: [PATCH 1/4] chore: latest TF versions in tox environments --- setup.cfg | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 87113135..a02d1ef5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,7 +8,9 @@ per-file-ignores = */__init__.py: F401 [tox:tox] -envlist = py{37,38,39,310}-tf{22,23,24,25,26,27,28,29,latest},py{37,38,39,310}-lint +envlist = + py{37,38,39,310}-tf{22,23,24,25,26,27,28,29,210,211,212,213,latest} + py{37,38,39,310}-lint [testenv] deps = @@ -22,6 +24,11 @@ deps = tf27: tensorflow ~= 2.7.0 tf28: tensorflow ~= 2.8.0 tf29: tensorflow ~= 2.9.0 + tf210: tensorflow ~= 2.10.0 + tf211: tensorflow ~= 2.11.0 + tf212: tensorflow ~= 2.12.0 + tf213: tensorflow ~= 2.13.0 + commands = python -m unittest From ec6714bf30313cf24221df3acb98fbd2c53b22d7 Mon Sep 17 00:00:00 2001 From: danibene <34680344+danibene@users.noreply.github.com> Date: Thu, 20 Jul 2023 12:57:07 -0400 Subject: [PATCH 2/4] fix: support TF 2.13 for conv_utils module conv_utils module has been moved again in TF 2.13, and can be imported as in TF <= 2.5. The error raised is ImportError, which was added to the try/except clause. ModuleNotFoundError is a subclass of ImportError; it is then removed. --- deel/lip/layers/convolutional.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deel/lip/layers/convolutional.py b/deel/lip/layers/convolutional.py index 33b80ad8..6e17a70a 100644 --- a/deel/lip/layers/convolutional.py +++ b/deel/lip/layers/convolutional.py @@ -43,8 +43,9 @@ try: from keras.utils import conv_utils # in Keras for TF >= 2.6 -except ModuleNotFoundError: - from tensorflow.python.keras.utils import conv_utils # in TF.python for TF <= 2.5 +except ImportError: + # conv_utils in tf.python for TF <= 2.5 and TF >= 2.13 + from tensorflow.python.keras.utils import conv_utils def _compute_conv_lip_factor(kernel_size, strides, input_shape, data_format): From e743035062e79273c471c8dd0e3ed5f53b2c8a98 Mon Sep 17 00:00:00 2001 From: cofri Date: Mon, 24 Jul 2023 14:24:38 +0200 Subject: [PATCH 3/4] fix(test): deserialization of h5 models w/ custom obj doesn't work in TF2.13 It seems that (de)serialization of h5 models in TF 2.13 has been changed: custom objects, even with "register_keras_serializable", cannot be loaded. Two solutions are possible: - use "with_custom_object_scope()" to load a model with custom objects. - save model in Keras format and not h5. The second option was chosen because this format is now preferred for saving models. --- tests/test_layers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_layers.py b/tests/test_layers.py index c378025e..46c3cfc4 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -226,8 +226,8 @@ def train_k_lip_model( ) empirical_lip_const = evaluate_lip_const(model=model, x=x, seed=42) # save the model - model_checkpoint_path = os.path.join(logdir, "model.h5") - model.save(model_checkpoint_path, overwrite=True, save_format="h5") + model_checkpoint_path = os.path.join(logdir, "model.keras") + model.save(model_checkpoint_path, overwrite=True) del model K.clear_session() model = load_model(model_checkpoint_path) @@ -1050,7 +1050,7 @@ def test_vanilla_export(self): # Test saving/loading model with tempfile.TemporaryDirectory() as tmpdir: - model_path = os.path.join(tmpdir, "model.h5") + model_path = os.path.join(tmpdir, "model.keras") model.save(model_path) tf.keras.models.load_model(model_path) From 1f91e7fbd336ce5b34cf427391bcfd021629e101 Mon Sep 17 00:00:00 2001 From: cofri Date: Tue, 25 Jul 2023 15:29:08 +0200 Subject: [PATCH 4/4] fix(test): random test was failing. Fix better seed. The power iteration test for Dense was sometimes failing due to a random choice of kernel and initialization vector u. Even if np.random.seed(42) was set at the beginning of the file. To ensure a fully deterministic test (deterministic kernel and initialization u), a random generator np.random.default_rng(42) was created and used to create both kernel and u. The results for Dense test are then deterministic, either for a single test file or for the whole unittest. Note that this is the only test modified; other tests of the file pass, they have not been changed with deterministic initializations. --- tests/test_normalizers.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/test_normalizers.py b/tests/test_normalizers.py index 125b76c5..a193f965 100644 --- a/tests/test_normalizers.py +++ b/tests/test_normalizers.py @@ -15,7 +15,7 @@ ) from deel.lip.utils import _padding_circular -np.random.seed(42) +rng = np.random.default_rng(42) class TestSpectralNorm(unittest.TestCase): @@ -24,11 +24,11 @@ class TestSpectralNorm(unittest.TestCase): def test_spectral_normalization(self): # Dense kernel kernel_shape = (15, 32) - kernel = np.random.normal(size=kernel_shape).astype("float32") + kernel = rng.normal(size=kernel_shape).astype("float32") self._test_kernel(kernel) # Dense kernel projection kernel_shape = (32, 15) - kernel = np.random.normal(size=kernel_shape).astype("float32") + kernel = rng.normal(size=kernel_shape).astype("float32") self._test_kernel(kernel) def _test_kernel(self, kernel): @@ -40,7 +40,8 @@ def _test_kernel(self, kernel): ).numpy() SVmax = np.max(sigmas_svd) - W_bar, _u, sigma = spectral_normalization(kernel, u=None, eps=1e-6) + u = rng.normal(size=(1, kernel.shape[-1])) + W_bar, _u, sigma = spectral_normalization(kernel, u=u, eps=1e-6) # Test sigma is close to the one computed with svd first run @ 1e-1 np.testing.assert_approx_equal( sigma, SVmax, 1, "test failed with kernel_shape " + str(kernel.shape)