From bbf63f8eba38f69fe36323e4c0f7f9bd78033c5f Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Wed, 8 Jan 2025 15:58:59 +0100 Subject: [PATCH 1/4] Bump dependencies and support Python 3.13 --- .github/workflows/unittest.yml | 2 +- pyproject.toml | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index d877eedf..55f2995a 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 diff --git a/pyproject.toml b/pyproject.toml index 82983def..0e329479 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,20 +3,20 @@ dynamic = ["version"] name = "vamb" dependencies = [ - "numpy == 1.26.4", - "torch == 2.3.1", - "pycoverm == 0.6.0", + "numpy == 2.2.1", + "torch == 2.5.1", + "pycoverm == 0.6.2", "networkx == 3.2", # 3.3 drops Python 3.9 support - "scikit-learn == 1.5.0", + "scikit-learn == 1.6.0", "dadaptation == 3.2", - "loguru == 0.7.2", - "pyhmmer == 0.10.12", - "pyrodigal == 3.4.1", + "loguru == 0.7.3", + "pyhmmer == 0.10.15", + "pyrodigal == 3.6.3", ] -# Currently pycoverm does not have binaries for Python > 3.12. -# The dependency resolver, will not error on Python 3.13, but attempt +# Currently pycoverm does not have binaries for Python > 3.13. +# The dependency resolver, will not error on Python 3.14, but attempt # to build pycoverm from source, but will not get the deps required for that. -requires-python = "<3.13,>=3.9.0" +requires-python = "<3.14,>=3.9.0" scripts = {vamb = "vamb.__main__:main"} [project.optional-dependencies] From 425ea6d6bfb88c335a4386a497476829d2699e65 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Wed, 8 Jan 2025 16:03:05 +0100 Subject: [PATCH 2/4] Drop Python 3.9 support --- .github/workflows/unittest.yml | 2 +- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 55f2995a..1f25dd62 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 diff --git a/pyproject.toml b/pyproject.toml index 0e329479..4c172792 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ dependencies = [ "numpy == 2.2.1", "torch == 2.5.1", "pycoverm == 0.6.2", - "networkx == 3.2", # 3.3 drops Python 3.9 support + "networkx == 3.4.2", "scikit-learn == 1.6.0", "dadaptation == 3.2", "loguru == 0.7.3", @@ -16,7 +16,7 @@ dependencies = [ # Currently pycoverm does not have binaries for Python > 3.13. # The dependency resolver, will not error on Python 3.14, but attempt # to build pycoverm from source, but will not get the deps required for that. -requires-python = "<3.14,>=3.9.0" +requires-python = "<3.14,>=3.10.0" scripts = {vamb = "vamb.__main__:main"} [project.optional-dependencies] From 0b6971ce06f3846d508b5989120beb347f7b250c Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Wed, 8 Jan 2025 16:25:19 +0100 Subject: [PATCH 3/4] Fixups for Python 3.13 --- .github/workflows/unittest.yml | 3 ++- test/test_vambtools.py | 4 ++-- vamb/encode.py | 2 +- vamb/semisupervised_encode.py | 2 +- vamb/taxvamb_encode.py | 2 +- vamb/vambtools.py | 2 +- 6 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 1f25dd62..d1c8e6a6 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -14,7 +14,8 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.10", "3.11", "3.12", "3.13"] + # There is a bug in Python 3.13.0 which breaks Vamb's tests, fixed in 3.13.1 + python-version: ["3.10", "3.11", "3.12", "3.13.1"] steps: - uses: actions/checkout@v3 diff --git a/test/test_vambtools.py b/test/test_vambtools.py index 6d90ab96..6cd923c1 100644 --- a/test/test_vambtools.py +++ b/test/test_vambtools.py @@ -291,10 +291,10 @@ def test_axes(self): ) def test_axis_bounds(self): - with self.assertRaises(np.AxisError): + with self.assertRaises(np.exceptions.AxisError): vamb.vambtools.zscore(self.arr, axis=-1) - with self.assertRaises(np.AxisError): + with self.assertRaises(np.exceptions.AxisError): vamb.vambtools.zscore(self.arr, axis=2) def test_integer(self): diff --git a/vamb/encode.py b/vamb/encode.py index 0224fef8..10f6fa76 100644 --- a/vamb/encode.py +++ b/vamb/encode.py @@ -514,7 +514,7 @@ def load( """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage) + dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=True) nsamples = dictionary["nsamples"] alpha = dictionary["alpha"] diff --git a/vamb/semisupervised_encode.py b/vamb/semisupervised_encode.py index 638ab8f8..d98bfdc9 100644 --- a/vamb/semisupervised_encode.py +++ b/vamb/semisupervised_encode.py @@ -1125,7 +1125,7 @@ def load(cls, path, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage) + dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"] diff --git a/vamb/taxvamb_encode.py b/vamb/taxvamb_encode.py index a97a08b5..80970be6 100644 --- a/vamb/taxvamb_encode.py +++ b/vamb/taxvamb_encode.py @@ -630,7 +630,7 @@ def load(cls, path, nodes, table_parent, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage) + dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"] diff --git a/vamb/vambtools.py b/vamb/vambtools.py index b43c683c..85486993 100644 --- a/vamb/vambtools.py +++ b/vamb/vambtools.py @@ -239,7 +239,7 @@ def zscore( """ if axis is not None and (axis >= array.ndim or axis < 0): - raise _np.AxisError(str(axis)) + raise _np.exceptions.AxisError(str(axis)) if inplace and not _np.issubdtype(array.dtype, _np.floating): raise TypeError("Cannot convert a non-float array to zscores") From 1b4a7afed77eda2353093d3b79cb5450acf697c1 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Wed, 8 Jan 2025 16:26:24 +0100 Subject: [PATCH 4/4] Format --- vamb/encode.py | 4 +++- vamb/semisupervised_encode.py | 4 +++- vamb/taxvamb_encode.py | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/vamb/encode.py b/vamb/encode.py index 10f6fa76..0a252e94 100644 --- a/vamb/encode.py +++ b/vamb/encode.py @@ -514,7 +514,9 @@ def load( """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=True) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=True + ) nsamples = dictionary["nsamples"] alpha = dictionary["alpha"] diff --git a/vamb/semisupervised_encode.py b/vamb/semisupervised_encode.py index d98bfdc9..f106ca79 100644 --- a/vamb/semisupervised_encode.py +++ b/vamb/semisupervised_encode.py @@ -1125,7 +1125,9 @@ def load(cls, path, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=False + ) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"] diff --git a/vamb/taxvamb_encode.py b/vamb/taxvamb_encode.py index 80970be6..45087f2c 100644 --- a/vamb/taxvamb_encode.py +++ b/vamb/taxvamb_encode.py @@ -630,7 +630,9 @@ def load(cls, path, nodes, table_parent, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=False + ) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"]