From 90e5d5a281fb02c1c4558e9097fc5fb980584321 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Enrique=20Gonz=C3=A1lez=20Paredes?= Date: Fri, 19 Jan 2024 13:44:39 +0100 Subject: [PATCH] feature[next]: add support for Python3.11 by fixing typing-related bugs (#1418) Fixes hidden bugs in `eve.datamodels` and `eve.extended_typing` to support Python 3.11. Actual bug fixes: - Previous fix to support `typing.Any` implementation as a class (https://github.com/python/cpython/commit/5a4973e29f2f5c4ee8c086f40325786c62381540) didn't work in 3.11. - Partially concretization of generic datamodels replacing typevars was broken. - Partially concretization of generic datamodels leaving some parameters as typevars was broken. Other changes: - Add python 3.11 as supported version. - Remove dead code in comments. - Fix some imports style to comply with our coding guidelines. --- .github/workflows/daily-ci.yml | 2 +- .github/workflows/test-cartesian-fallback.yml | 2 +- .github/workflows/test-cartesian.yml | 2 +- .github/workflows/test-eve-fallback.yml | 2 +- .github/workflows/test-eve.yml | 3 +- .github/workflows/test-next-fallback.yml | 2 +- .github/workflows/test-next.yml | 2 +- .github/workflows/test-storage-fallback.yml | 2 +- .github/workflows/test-storage.yml | 3 +- src/gt4py/eve/datamodels/core.py | 40 ++---- src/gt4py/eve/extended_typing.py | 15 ++- tests/eve_tests/unit_tests/test_datamodels.py | 117 ++++++++++-------- .../unit_tests/test_type_validation.py | 5 +- .../ffront_tests/test_icon_like_scan.py | 26 ++-- .../test_single_static_assign.py | 5 +- tox.ini | 35 +++--- 16 files changed, 140 insertions(+), 123 deletions(-) diff --git a/.github/workflows/daily-ci.yml b/.github/workflows/daily-ci.yml index 77ba39a361..8631390dbb 100644 --- a/.github/workflows/daily-ci.yml +++ b/.github/workflows/daily-ci.yml @@ -14,7 +14,7 @@ jobs: daily-ci: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] tox-module-factor: ["cartesian", "eve", "next", "storage"] os: ["ubuntu-latest"] requirements-file: ["requirements-dev.txt", "min-requirements-test.txt", "min-extra-requirements-test.txt"] diff --git a/.github/workflows/test-cartesian-fallback.yml b/.github/workflows/test-cartesian-fallback.yml index b2eaead47a..7e9a948e9c 100644 --- a/.github/workflows/test-cartesian-fallback.yml +++ b/.github/workflows/test-cartesian-fallback.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] backends: [internal-cpu, dace-cpu] steps: diff --git a/.github/workflows/test-cartesian.yml b/.github/workflows/test-cartesian.yml index 2c2b97aaa6..ebdc4ce749 100644 --- a/.github/workflows/test-cartesian.yml +++ b/.github/workflows/test-cartesian.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] backends: [internal-cpu, dace-cpu] steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/test-eve-fallback.yml b/.github/workflows/test-eve-fallback.yml index 93dc308a53..fd7ab5452c 100644 --- a/.github/workflows/test-eve-fallback.yml +++ b/.github/workflows/test-eve-fallback.yml @@ -17,7 +17,7 @@ jobs: test-eve: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} diff --git a/.github/workflows/test-eve.yml b/.github/workflows/test-eve.yml index 1322c573db..222b825f38 100644 --- a/.github/workflows/test-eve.yml +++ b/.github/workflows/test-eve.yml @@ -20,7 +20,7 @@ jobs: test-eve: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] os: ["ubuntu-latest"] fail-fast: false @@ -68,4 +68,3 @@ jobs: # with: # name: info-py${{ matrix.python-version }}-${{ matrix.os }} # path: info.txt - diff --git a/.github/workflows/test-next-fallback.yml b/.github/workflows/test-next-fallback.yml index 8490a3e393..bdcc061db0 100644 --- a/.github/workflows/test-next-fallback.yml +++ b/.github/workflows/test-next-fallback.yml @@ -15,7 +15,7 @@ jobs: test-next: strategy: matrix: - python-version: ["3.10"] + python-version: ["3.10", "3.11"] tox-env-factor: ["nomesh", "atlas"] os: ["ubuntu-latest"] diff --git a/.github/workflows/test-next.yml b/.github/workflows/test-next.yml index 52f8c25386..4282a22da6 100644 --- a/.github/workflows/test-next.yml +++ b/.github/workflows/test-next.yml @@ -18,7 +18,7 @@ jobs: test-next: strategy: matrix: - python-version: ["3.10"] + python-version: ["3.10", "3.11"] tox-env-factor: ["nomesh", "atlas"] os: ["ubuntu-latest"] fail-fast: false diff --git a/.github/workflows/test-storage-fallback.yml b/.github/workflows/test-storage-fallback.yml index 0cbc735564..99e4923de8 100644 --- a/.github/workflows/test-storage-fallback.yml +++ b/.github/workflows/test-storage-fallback.yml @@ -18,7 +18,7 @@ jobs: test-storage: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] backends: [internal-cpu, dace-cpu] os: ["ubuntu-latest"] diff --git a/.github/workflows/test-storage.yml b/.github/workflows/test-storage.yml index 1133353f30..34841ed71c 100644 --- a/.github/workflows/test-storage.yml +++ b/.github/workflows/test-storage.yml @@ -21,7 +21,7 @@ jobs: test-storage: strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] backends: [internal-cpu, dace-cpu] os: ["ubuntu-latest"] fail-fast: false @@ -70,4 +70,3 @@ jobs: # with: # name: info-py${{ matrix.python-version }}-${{ matrix.os }} # path: info.txt - diff --git a/src/gt4py/eve/datamodels/core.py b/src/gt4py/eve/datamodels/core.py index 5660fdbf76..bc744b3ccc 100644 --- a/src/gt4py/eve/datamodels/core.py +++ b/src/gt4py/eve/datamodels/core.py @@ -883,17 +883,6 @@ def _substitute_typevars( return type_params_map[type_hint], True elif getattr(type_hint, "__parameters__", []): return type_hint[tuple(type_params_map[tp] for tp in type_hint.__parameters__)], True - # TODO(egparedes): WIP fix for partial specialization - # # Type hint is a generic model: replace all the concretized type vars - # noqa: e800 replaced = False - # noqa: e800 new_args = [] - # noqa: e800 for tp in type_hint.__parameters__: - # noqa: e800 if tp in type_params_map: - # noqa: e800 new_args.append(type_params_map[tp]) - # noqa: e800 replaced = True - # noqa: e800 else: - # noqa: e800 new_args.append(type_params_map[tp]) - # noqa: e800 return type_hint[tuple(new_args)], replaced else: return type_hint, False @@ -981,21 +970,14 @@ def __class_getitem__( """ type_args: Tuple[Type] = args if isinstance(args, tuple) else (args,) concrete_cls: Type[DataModelT] = concretize(cls, *type_args) - res = xtyping.StdGenericAliasType(concrete_cls, type_args) - if sys.version_info < (3, 9): - # in Python 3.8, xtyping.StdGenericAliasType (aka typing._GenericAlias) - # does not copy all required `__dict__` entries, so do it manually - for k, v in concrete_cls.__dict__.items(): - if k not in res.__dict__: - res.__dict__[k] = v - return res + return concrete_cls return classmethod(__class_getitem__) def _make_type_converter(type_annotation: TypeAnnotation, name: str) -> TypeConverter[_T]: - # TODO(egparedes): if a "typing tree" structure is implemented, refactor this code as a tree traversal. - # + # TODO(egparedes): if a "typing tree" structure is implemented, refactor this code + # as a tree traversal. if xtyping.is_actual_type(type_annotation) and not isinstance(None, type_annotation): assert not xtyping.get_args(type_annotation) assert isinstance(type_annotation, type) @@ -1316,11 +1298,7 @@ def _make_concrete_with_cache( # Replace field definitions with the new actual types for generic fields type_params_map = dict(zip(datamodel_cls.__parameters__, type_args)) model_fields = getattr(datamodel_cls, MODEL_FIELD_DEFINITIONS_ATTR) - new_annotations = { - # TODO(egparedes): ? - # noqa: e800 "__args__": "ClassVar[Tuple[Union[Type, TypeVar], ...]]", - # noqa: e800 "__parameters__": "ClassVar[Tuple[TypeVar, ...]]", - } + new_annotations = {} new_field_c_attrs = {} for field_name, field_type in xtyping.get_type_hints(datamodel_cls).items(): @@ -1353,8 +1331,16 @@ def _make_concrete_with_cache( "__module__": module if module else datamodel_cls.__module__, **new_field_c_attrs, } - concrete_cls = type(class_name, (datamodel_cls,), namespace) + + # Update the tuple of generic parameters in the new class, in case + # this is a partial concretization + assert hasattr(concrete_cls, "__parameters__") + concrete_cls.__parameters__ = tuple( + type_params_map[tp_var] + for tp_var in datamodel_cls.__parameters__ + if isinstance(type_params_map[tp_var], typing.TypeVar) + ) assert concrete_cls.__module__ == module or not module if MODEL_FIELD_DEFINITIONS_ATTR not in concrete_cls.__dict__: diff --git a/src/gt4py/eve/extended_typing.py b/src/gt4py/eve/extended_typing.py index 17462a37ff..3ee447ca6c 100644 --- a/src/gt4py/eve/extended_typing.py +++ b/src/gt4py/eve/extended_typing.py @@ -493,7 +493,7 @@ def _patched_proto_hook(other): # type: ignore[no-untyped-def] if isinstance(_typing.Any, type): # Python >= 3.11 _ArtefactTypes = (*_ArtefactTypes, _typing.Any) -# `Any` is a class since typing_extensions >= 4.4 +# `Any` is a class since typing_extensions >= 4.4 and Python 3.11 if (typing_exts_any := getattr(_typing_extensions, "Any", None)) is not _typing.Any and isinstance( typing_exts_any, type ): @@ -504,11 +504,13 @@ def is_actual_type(obj: Any) -> TypeGuard[Type]: """Check if an object has an actual type and instead of a typing artefact like ``GenericAlias`` or ``Any``. This is needed because since Python 3.9: - ``isinstance(types.GenericAlias(), type) is True`` + ``isinstance(types.GenericAlias(), type) is True`` and since Python 3.11: - ``isinstance(typing.Any, type) is True`` + ``isinstance(typing.Any, type) is True`` """ - return isinstance(obj, type) and type(obj) not in _ArtefactTypes + return ( + isinstance(obj, type) and (obj not in _ArtefactTypes) and (type(obj) not in _ArtefactTypes) + ) if hasattr(_typing_extensions, "Any") and _typing.Any is not _typing_extensions.Any: # type: ignore[attr-defined] # _typing_extensions.Any only from >= 4.4 @@ -641,9 +643,12 @@ def get_partial_type_hints( resolved_hints = get_type_hints( # type: ignore[call-arg] # Python 3.8 does not define `include-extras` obj, globalns=globalns, localns=localns, include_extras=include_extras ) - hints.update(resolved_hints) + hints[name] = resolved_hints[name] except NameError as error: if isinstance(hint, str): + # This conversion could be probably skipped in Python versions containing + # the fix applied in bpo-41370. Check: + # https://github.com/python/cpython/commit/b465b606049f6f7dd0711cb031fdaa251818741a#diff-ddb987fca5f5df0c9a2f5521ed687919d70bb3d64eaeb8021f98833a2a716887R344 hints[name] = ForwardRef(hint) elif isinstance(hint, (ForwardRef, _typing.ForwardRef)): hints[name] = hint diff --git a/tests/eve_tests/unit_tests/test_datamodels.py b/tests/eve_tests/unit_tests/test_datamodels.py index 8fa9e02cb6..0abb893dd4 100644 --- a/tests/eve_tests/unit_tests/test_datamodels.py +++ b/tests/eve_tests/unit_tests/test_datamodels.py @@ -15,6 +15,7 @@ from __future__ import annotations import enum +import numbers import types import typing from typing import Set # noqa: F401 # imported but unused (used in exec() context) @@ -1150,66 +1151,80 @@ class PartialGenericModel(datamodels.GenericDataModel, Generic[T]): with pytest.raises(TypeError, match="'PartialGenericModel__int.value'"): PartialGenericModel__int(value=["1"]) - def test_partial_specialization(self): - class PartialGenericModel(datamodels.GenericDataModel, Generic[T, U]): + def test_partial_concretization(self): + class BaseGenericModel(datamodels.GenericDataModel, Generic[T, U]): value: List[Tuple[T, U]] - PartialGenericModel(value=[]) - PartialGenericModel(value=[("value", 3)]) - PartialGenericModel(value=[(1, "value")]) - PartialGenericModel(value=[(-1.0, "value")]) - with pytest.raises(TypeError, match="'PartialGenericModel.value'"): - PartialGenericModel(value=1) - with pytest.raises(TypeError, match="'PartialGenericModel.value'"): - PartialGenericModel(value=(1, 2)) - with pytest.raises(TypeError, match="'PartialGenericModel.value'"): - PartialGenericModel(value=[()]) - with pytest.raises(TypeError, match="'PartialGenericModel.value'"): - PartialGenericModel(value=[(1,)]) + assert len(BaseGenericModel.__parameters__) == 2 + + BaseGenericModel(value=[]) + BaseGenericModel(value=[("value", 3)]) + BaseGenericModel(value=[(1, "value")]) + BaseGenericModel(value=[(-1.0, "value")]) + with pytest.raises(TypeError, match="'BaseGenericModel.value'"): + BaseGenericModel(value=1) + with pytest.raises(TypeError, match="'BaseGenericModel.value'"): + BaseGenericModel(value=(1, 2)) + with pytest.raises(TypeError, match="'BaseGenericModel.value'"): + BaseGenericModel(value=[()]) + with pytest.raises(TypeError, match="'BaseGenericModel.value'"): + BaseGenericModel(value=[(1,)]) + + PartiallyConcretizedGenericModel = BaseGenericModel[int, U] + + assert len(PartiallyConcretizedGenericModel.__parameters__) == 1 + + PartiallyConcretizedGenericModel(value=[]) + PartiallyConcretizedGenericModel(value=[(1, 2)]) + PartiallyConcretizedGenericModel(value=[(1, "value")]) + PartiallyConcretizedGenericModel(value=[(1, (11, 12))]) + with pytest.raises(TypeError, match=".value'"): + PartiallyConcretizedGenericModel(value=1) + with pytest.raises(TypeError, match=".value'"): + PartiallyConcretizedGenericModel(value=(1, 2)) + with pytest.raises(TypeError, match=".value'"): + PartiallyConcretizedGenericModel(value=[1.0]) + with pytest.raises(TypeError, match=".value'"): + PartiallyConcretizedGenericModel(value=["1"]) - print(f"{PartialGenericModel.__parameters__=}") - print(f"{hasattr(PartialGenericModel ,'__args__')=}") + FullyConcretizedGenericModel = PartiallyConcretizedGenericModel[str] - PartiallySpecializedGenericModel = PartialGenericModel[int, U] - print(f"{PartiallySpecializedGenericModel.__datamodel_fields__=}") - print(f"{PartiallySpecializedGenericModel.__parameters__=}") - print(f"{PartiallySpecializedGenericModel.__args__=}") + assert len(FullyConcretizedGenericModel.__parameters__) == 0 - PartiallySpecializedGenericModel(value=[]) - PartiallySpecializedGenericModel(value=[(1, 2)]) - PartiallySpecializedGenericModel(value=[(1, "value")]) - PartiallySpecializedGenericModel(value=[(1, (11, 12))]) + FullyConcretizedGenericModel(value=[]) + FullyConcretizedGenericModel(value=[(1, "value")]) + with pytest.raises(TypeError, match=".value'"): + FullyConcretizedGenericModel(value=1) + with pytest.raises(TypeError, match=".value'"): + FullyConcretizedGenericModel(value=(1, 2)) with pytest.raises(TypeError, match=".value'"): - PartiallySpecializedGenericModel(value=1) + FullyConcretizedGenericModel(value=[1.0]) with pytest.raises(TypeError, match=".value'"): - PartiallySpecializedGenericModel(value=(1, 2)) + FullyConcretizedGenericModel(value=["1"]) with pytest.raises(TypeError, match=".value'"): - PartiallySpecializedGenericModel(value=[1.0]) + FullyConcretizedGenericModel(value=1) with pytest.raises(TypeError, match=".value'"): - PartiallySpecializedGenericModel(value=["1"]) - - # TODO(egparedes): after fixing partial nested datamodel specialization - # noqa: e800 FullySpecializedGenericModel = PartiallySpecializedGenericModel[str] - # noqa: e800 print(f"{FullySpecializedGenericModel.__datamodel_fields__=}") - # noqa: e800 print(f"{FullySpecializedGenericModel.__parameters__=}") - # noqa: e800 print(f"{FullySpecializedGenericModel.__args__=}") - - # noqa: e800 FullySpecializedGenericModel(value=[]) - # noqa: e800 FullySpecializedGenericModel(value=[(1, "value")]) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=1) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=(1, 2)) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=[1.0]) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=["1"]) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=1) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=[(1, 2)]) - # noqa: e800 with pytest.raises(TypeError, match=".value'"): - # noqa: e800 FullySpecializedGenericModel(value=[(1, (11, 12))]) + FullyConcretizedGenericModel(value=[(1, 2)]) + with pytest.raises(TypeError, match=".value'"): + FullyConcretizedGenericModel(value=[(1, (11, 12))]) + + def test_partial_concretization_with_typevar(self): + class PartialGenericModel(datamodels.GenericDataModel, Generic[T]): + a: T + values: List[T] + + B = TypeVar("B", bound=numbers.Number) + PartiallyConcretizedGenericModel = PartialGenericModel[B] + + PartiallyConcretizedGenericModel(a=1, values=[2, 3]) + PartiallyConcretizedGenericModel(a=-1.32, values=[2.2, 3j]) + + with pytest.raises(TypeError, match=".a'"): + PartiallyConcretizedGenericModel(a="1", values=[2, 3]) + with pytest.raises(TypeError, match=".values'"): + PartiallyConcretizedGenericModel(a=1, values=[1, "2"]) + with pytest.raises(TypeError, match=".values'"): + PartiallyConcretizedGenericModel(a=1, values=(1, 2)) # Reuse sample_type_data from test_field_type_hint @pytest.mark.parametrize(["type_hint", "valid_values", "wrong_values"], SAMPLE_TYPE_DATA) diff --git a/tests/eve_tests/unit_tests/test_type_validation.py b/tests/eve_tests/unit_tests/test_type_validation.py index 70ef033ff0..d9977f0d3a 100644 --- a/tests/eve_tests/unit_tests/test_type_validation.py +++ b/tests/eve_tests/unit_tests/test_type_validation.py @@ -28,6 +28,7 @@ ) from gt4py.eve.extended_typing import ( Any, + Callable, Dict, Final, ForwardRef, @@ -41,8 +42,8 @@ ) -VALIDATORS: Final = [type_val.simple_type_validator] -FACTORIES: Final = [type_val.simple_type_validator_factory] +VALIDATORS: Final[list[Callable]] = [type_val.simple_type_validator] +FACTORIES: Final[list[Callable]] = [type_val.simple_type_validator_factory] class SampleEnum(enum.Enum): diff --git a/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_icon_like_scan.py b/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_icon_like_scan.py index 130f6bd29c..5bd255f80f 100644 --- a/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_icon_like_scan.py +++ b/tests/next_tests/integration_tests/multi_feature_tests/ffront_tests/test_icon_like_scan.py @@ -12,7 +12,7 @@ # # SPDX-License-Identifier: GPL-3.0-or-later -from dataclasses import dataclass +import dataclasses import numpy as np import pytest @@ -201,22 +201,26 @@ def test_setup(fieldview_backend): grid_type=common.GridType.UNSTRUCTURED, ) - @dataclass(frozen=True) + @dataclasses.dataclass(frozen=True) class setup: - case: cases.Case = test_case - cell_size = case.default_sizes[Cell] - k_size = case.default_sizes[KDim] - z_alpha = case.as_field( + case: cases.Case = dataclasses.field(default_factory=lambda: test_case) + cell_size = test_case.default_sizes[Cell] + k_size = test_case.default_sizes[KDim] + z_alpha = test_case.as_field( [Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size + 1)) ) - z_beta = case.as_field( + z_beta = test_case.as_field( + [Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size)) + ) + z_q = test_case.as_field( + [Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size)) + ) + w = test_case.as_field( [Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size)) ) - z_q = case.as_field([Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size))) - w = case.as_field([Cell, KDim], np.random.default_rng().uniform(size=(cell_size, k_size))) z_q_ref, w_ref = reference(z_alpha.ndarray, z_beta.ndarray, z_q.ndarray, w.ndarray) - dummy = case.as_field([Cell, KDim], np.zeros((cell_size, k_size), dtype=bool)) - z_q_out = case.as_field([Cell, KDim], np.zeros((cell_size, k_size))) + dummy = test_case.as_field([Cell, KDim], np.zeros((cell_size, k_size), dtype=bool)) + z_q_out = test_case.as_field([Cell, KDim], np.zeros((cell_size, k_size))) return setup() diff --git a/tests/next_tests/unit_tests/ffront_tests/ast_passes_tests/test_single_static_assign.py b/tests/next_tests/unit_tests/ffront_tests/ast_passes_tests/test_single_static_assign.py index 052f272d22..ea1cdb82a6 100644 --- a/tests/next_tests/unit_tests/ffront_tests/ast_passes_tests/test_single_static_assign.py +++ b/tests/next_tests/unit_tests/ffront_tests/ast_passes_tests/test_single_static_assign.py @@ -108,7 +108,10 @@ def test_unpacking_swap(): lines = ast.unparse(ssa_ast).split("\n") assert lines[0] == f"a{SEP}0 = 5" assert lines[1] == f"b{SEP}0 = 1" - assert lines[2] == f"(b{SEP}1, a{SEP}1) = (a{SEP}0, b{SEP}0)" + assert lines[2] in [ + f"(b{SEP}1, a{SEP}1) = (a{SEP}0, b{SEP}0)", + f"b{SEP}1, a{SEP}1 = (a{SEP}0, b{SEP}0)", + ] # unparse produces different parentheses in different Python versions def test_annotated_assign(): diff --git a/tox.ini b/tox.ini index 44dc912c8a..817f721f71 100644 --- a/tox.ini +++ b/tox.ini @@ -11,21 +11,24 @@ envlist = # docs labels = test-cartesian-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu + cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ + cartesian-py311-dace-cpu - test-eve-cpu = eve-py38, eve-py39, eve-py310 + test-eve-cpu = eve-py38, eve-py39, eve-py310, eve-py311 - test-next-cpu = next-py310-nomesh, next-py310-atlas + test-next-cpu = next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas test-storage-cpu = storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, \ - storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu + storage-py311-internal-cpu, storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, \ + storage-py311-dace-cpu test-cpu = cartesian-py38-internal-cpu, cartesian-py39-internal-cpu, cartesian-py310-internal-cpu, \ - cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ - eve-py38, eve-py39, eve-py310, \ - next-py310-nomesh, next-py310-atlas, \ - storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, \ - storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu + cartesian-py311-internal-cpu, cartesian-py38-dace-cpu, cartesian-py39-dace-cpu, cartesian-py310-dace-cpu, \ + cartesian-py311-dace-cpu, \ + eve-py38, eve-py39, eve-py310, eve-py311, \ + next-py310-nomesh, next-py311-nomesh, next-py310-atlas, next-py311-atlas, \ + storage-py38-internal-cpu, storage-py39-internal-cpu, storage-py310-internal-cpu, storage-py311-internal-cpu, \ + storage-py38-dace-cpu, storage-py39-dace-cpu, storage-py310-dace-cpu, storage-py311-dace-cpu [testenv] deps = -r {tox_root}{/}{env:ENV_REQUIREMENTS_FILE:requirements-dev.txt} @@ -44,7 +47,7 @@ pass_env = NUM_PROCESSES set_env = PYTHONWARNINGS = {env:PYTHONWARNINGS:ignore:Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*:UserWarning} -[testenv:cartesian-py{38,39,310}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:cartesian-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.cartesian' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH, CXX, CC, OPENMP_CPPFLAGS, OPENMP_LDFLAGS, PIP_USER, PYTHONUSERBASE allowlist_externals = @@ -65,13 +68,13 @@ commands = ; coverage json --rcfile=setup.cfg ; coverage html --rcfile=setup.cfg --show-contexts -[testenv:eve-py{38,39,310}] +[testenv:eve-py{38,39,310,311}] description = Run 'gt4py.eve' tests commands = python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} {posargs} tests{/}eve_tests python -m pytest --doctest-modules src{/}gt4py{/}eve -[testenv:next-py{310}-{nomesh,atlas}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:next-py{310,311}-{nomesh,atlas}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.next' tests pass_env = {[testenv]pass_env}, BOOST_ROOT, BOOST_HOME, CUDA_HOME, CUDA_PATH deps = @@ -87,14 +90,14 @@ commands = # atlas-{cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_atlas and requires_gpu" {posargs} tests{/}next_tests # TODO(ricoh): activate when such tests exist pytest --doctest-modules src{/}gt4py{/}next -[testenv:storage-py{38,39,310}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] +[testenv:storage-py{38,39,310,311}-{internal,dace}-{cpu,cuda,cuda11x,cuda12x}] description = Run 'gt4py.storage' tests commands = cpu: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "not requires_gpu" {posargs} tests{/}storage_tests {cuda,cuda11x,cuda12x}: python -m pytest --cache-clear -v -n {env:NUM_PROCESSES:1} -m "requires_gpu" {posargs} tests{/}storage_tests #pytest doctest-modules {posargs} src{/}gt4py{/}storage -[testenv:linters-py{38,39,310}] +[testenv:linters-py{38,39,310,311}] description = Run linters commands = flake8 .{/}src @@ -134,11 +137,13 @@ description = py38: Update requirements for testing a specific python version py39: Update requirements for testing a specific python version py310: Update requirements for testing a specific python version + py311: Update requirements for testing a specific python version base_python = common: py38 py38: py38 py39: py39 py310: py310 + py311: py311 deps = cogapp>=3.3 pip-tools>=6.10 @@ -178,7 +183,7 @@ commands = # Run cog to update .pre-commit-config.yaml with new versions common: cog -r -P .pre-commit-config.yaml -[testenv:dev-py{38,39,310}{-atlas,}] +[testenv:dev-py{38,39,310,311}{-atlas,}] description = Initialize development environment for gt4py deps = -r {tox_root}{/}requirements-dev.txt