Skip to content

Commit

Permalink
[MNT] Changes to no soft dependency tests (#1476)
Browse files Browse the repository at this point in the history
* remove no soft deps job and expand full no soft deps job

* no PR testing flag

* module soft deps

* is this actually returning an empty list

* import modules now

* clear out module soft dep
  • Loading branch information
MatthewMiddlehurst authored May 17, 2024
1 parent 5831a2d commit a997a64
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 187 deletions.
File renamed without changes.
51 changes: 4 additions & 47 deletions .github/workflows/periodic_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ jobs:
# Save cache with the current date (ENV set in numba_cache action)
key: numba-run-notebook-examples-${{ runner.os }}-3.10-${{ env.CURRENT_DATE }}

test-nosoftdeps:
test-no-soft-deps:
runs-on: ubuntu-20.04

steps:
Expand All @@ -98,7 +98,7 @@ jobs:
- name: Use numba cache to set env variables but not restore cache
uses: ./.github/actions/numba_cache
with:
cache_name: "test-nosoftdeps"
cache_name: "test-no-soft-deps"
runner_os: ${{ runner.os }}
python_version: "3.10"
restore_cache: "false"
Expand All @@ -114,57 +114,14 @@ jobs:
run: python -m pip list

- name: Run tests
run: |
python -m pytest -k 'test_all_estimators' --pyargs aeon.registry
python -m pytest -k 'test_check_estimator_does_not_raise' --pyargs aeon.utils
python -m pytest --pyargs aeon.testing.test_softdeps
- name: Save new cache
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.numba_cache
# Save cache with the current date (ENV set in numba_cache action)
key: numba-test-nosoftdeps-${{ runner.os }}-3.10-${{ env.CURRENT_DATE }}

test-nosoftdeps-full:
runs-on: ubuntu-20.04

steps:
- name: Checkout main
uses: actions/checkout@v4

- name: Setup Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"

- name: Use numba cache to set env variables but not restore cache
uses: ./.github/actions/numba_cache
with:
cache_name: "test-nosoftdeps-full"
runner_os: ${{ runner.os }}
python_version: "3.10"
restore_cache: "false"

- name: Install aeon and dependencies
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: python -m pip install .[dev]

- name: Show dependencies
run: python -m pip list

- name: Run tests
run: python -m pytest -k 'not TestAll'
run: python -m pytest

- name: Save new cache
uses: actions/cache/save@v4
with:
path: ${{ github.workspace }}/.numba_cache
# Save cache with the current date (ENV set in numba_cache action)
key: numba-test-nosoftdeps-full-${{ runner.os }}-3.10-${{ env.CURRENT_DATE }}
key: numba-test-no-soft-deps-${{ runner.os }}-3.10-${{ env.CURRENT_DATE }}

pytest:
runs-on: ${{ matrix.os }}
Expand Down
44 changes: 3 additions & 41 deletions .github/workflows/pr_pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ concurrency:
cancel-in-progress: true

jobs:
test-nosoftdeps:
test-no-soft-deps:
runs-on: ubuntu-20.04

steps:
Expand All @@ -32,7 +32,7 @@ jobs:
- name: Use numba cache to set env variables and restore cache
uses: ./.github/actions/numba_cache
with:
cache_name: "test-nosoftdeps"
cache_name: "test-no-soft-deps"
runner_os: ${{ runner.os }}
python_version: "3.10"

Expand All @@ -47,46 +47,9 @@ jobs:
run: python -m pip list

- name: Run tests
run: |
python -m pytest -k 'test_all_estimators' --pyargs aeon.registry
python -m pytest -k 'test_check_estimator_does_not_raise' --pyargs aeon.utils
python -m pytest --pyargs aeon.testing.test_softdeps
test-nosoftdeps-full:
needs: test-nosoftdeps
runs-on: ubuntu-20.04

steps:
- name: Checkout main
uses: actions/checkout@v4

- name: Setup Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"

- name: Use numba cache to set env variables and restore cache
uses: ./.github/actions/numba_cache
with:
cache_name: "test-nosoftdeps-full"
runner_os: ${{ runner.os }}
python_version: "3.10"

- name: Install aeon and dependencies
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: python -m pip install .[dev]

- name: Show dependencies
run: python -m pip list

- name: Run tests
run: python -m pytest -k 'not TestAll' --prtesting ${{ github.event_name != 'pull_request' || !contains(github.event.pull_request.labels.*.name, 'full pytest actions') }}
run: python -m pytest -k 'not TestAll'

pytest:
needs: test-nosoftdeps
runs-on: ${{ matrix.os }}

strategy:
Expand Down Expand Up @@ -137,7 +100,6 @@ jobs:
# run the code coverage job if a PR has the 'codecov actions' label
if: ${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'codecov actions') }}

needs: test-nosoftdeps
runs-on: ubuntu-20.04

steps:
Expand Down
6 changes: 4 additions & 2 deletions aeon/clustering/tests/test_all_clusterers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@
import pytest

from aeon.registry import all_estimators
from aeon.testing.test_softdeps import soft_deps_installed
from aeon.utils.validation._dependencies import _check_soft_dependencies

ALL_CLUSTERERS = all_estimators("clusterer", return_names=False)


@pytest.mark.parametrize("clst", ALL_CLUSTERERS)
def test_clusterer_tags_consistent(clst):
"""Test all estimators capability tags reflect their capabilities."""
if not soft_deps_installed(clst):
if not _check_soft_dependencies(
clst.get_class_tag("python_dependencies", []), severity="none"
):
return

# Test the tag X_inner_type is consistent with capability:unequal_length
Expand Down
125 changes: 28 additions & 97 deletions aeon/testing/test_softdeps.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
"""Tests that soft dependencies are handled correctly.
aeon supports a number of soft dependencies which are necessary for using
a certain module but otherwise not necessary.
Adapted from code of mloning for the legacy Azure CI/CD build tools.
a certain module or estimator but otherwise not necessary.
"""

__maintainer__ = []
Expand All @@ -14,109 +12,57 @@

import pytest

import aeon
from aeon.registry import all_estimators
from aeon.testing.test_config import EXCLUDE_ESTIMATORS
from aeon.testing.test_config import EXCLUDE_ESTIMATORS, PR_TESTING
from aeon.testing.utils.scenarios_getter import retrieve_scenarios
from aeon.utils.validation._dependencies import (
_check_python_version,
_check_soft_dependencies,
)

# list of soft dependencies used
# excludes estimators, only for soft dependencies used in non-estimator modules
SOFT_DEPENDENCIES = {
"aeon.benchmarking.evaluation": ["matplotlib"],
"aeon.benchmarking.experiments": ["tsfresh", "esig"],
"aeon.classification.deep_learning": ["tensorflow"],
"aeon.regression.deep_learning": ["tensorflow"],
"aeon.clustering.deep_learning": ["tensorflow"],
"aeon.networks": ["tensorflow"],
"aeon.visualisation": ["matplotlib"],
}

MODULES_TO_IGNORE = "aeon.testing.utils"


def _is_test(module):
module_parts = module.split(".")
return any(part in ("tests", "test") for part in module_parts)


def _is_ignored(module):
return any(module_to_ignore in module for module_to_ignore in MODULES_TO_IGNORE)


def _extract_dependency_from_error_msg(msg):
# We raise an user-friendly error if a soft dependency is missing in the
# `check_soft_dependencies` function. In the error message, the missing
# dependency is printed in single quotation marks, so we can use that here to
# extract and return the dependency name.
match = re.search(r"\'(.+?)\'", msg)
if match:
return match.group(1)
else:
raise ValueError("No dependency found in error msg.")

# collect all modules
modules = pkgutil.walk_packages(aeon.__path__, aeon.__name__ + ".")
modules = [x[1] for x in modules]

def test___extract_dependency_from_error_msg():
"""Test that _extract_dependency_from_error_msg works."""
msg = (
"No module named 'tensorflow'. "
"Tensorflow is a soft dependency. "
"To use tensorflow, please install it separately."
)
assert _extract_dependency_from_error_msg(msg) == "tensorflow"
with pytest.raises(ValueError, match="No dependency found in error msg"):
_extract_dependency_from_error_msg("No dependency.")
if PR_TESTING:
# exclude test modules
modules = [x for x in modules if not any(part == "tests" for part in x.split("."))]


# collect all modules
modules = pkgutil.walk_packages(path=["./aeon/"], prefix="aeon.")
modules = [x[1] for x in modules]
modules = [x for x in modules if not _is_test(x) and not _is_ignored(x)]
def test_module_crawl():
"""Test that we are crawling modules correctly."""
assert "aeon.classification" in modules
assert "aeon.classification.shapelet_based" in modules
assert "aeon.classification.base" in modules
assert "aeon.forecasting" in modules


@pytest.mark.parametrize("module", modules)
def test_module_softdeps(module):
"""Test soft dependency imports in aeon modules."""
# We try importing all modules and catch exceptions due to missing dependencies
def test_module_soft_deps(module):
"""Test soft dependency imports in aeon modules.
Imports all modules and catch exceptions due to missing dependencies.
"""
try:
import_module(module)
except ModuleNotFoundError as e:
error_msg = str(e)
dependency = "unknown"
match = re.search(r"\'(.+?)\'", str(e))
if match:
dependency = match.group(1)

# Check if appropriate exception with useful error message is raised as
# defined in the `_check_soft_dependencies` function
expected_error_msg = (
"is a soft dependency and not included in the base aeon installation"
)
# message is different for deep learning deps tensorflow, tensorflow-proba
error_msg_alt = "required for deep learning"

if expected_error_msg not in error_msg and error_msg_alt not in error_msg:
raise RuntimeError(
f"The module: {module} seems to require a soft "
f"dependency, but does not raise an appropriate error "
f"message when the soft dependency is missing. Please "
f"use our `_check_soft_dependencies` function to "
f"raise a more appropriate error message."
) from e

# If the error is raised in a module which does depend on a soft dependency,
# we ignore and skip it
dependencies = SOFT_DEPENDENCIES.get(module, [])
if any(dependency in error_msg for dependency in dependencies):
return None

# Otherwise we raise an error
dependency = _extract_dependency_from_error_msg(error_msg)
raise ModuleNotFoundError(
f"The module: {module} should not require any soft dependencies, "
f"but tried importing: '{dependency}'. Make sure soft dependencies are "
f"properly isolated."
) from e


# TODO test revamp: this can be part a greater check of all estimators probably, dont
# need to discover all estimators again here


def _has_soft_dep(est):
"""Return whether an estimator has soft dependencies."""
softdep = est.get_class_tag("python_dependencies", None)
Expand All @@ -133,13 +79,6 @@ def _coerce_list_of_str(obj):
return obj


def test__coerce_list_of_str():
"""Test that _coerce_list_of_str works."""
assert _coerce_list_of_str(None) == []
assert _coerce_list_of_str("a") == ["a"]
assert _coerce_list_of_str(["a"]) == ["a"]


def _get_soft_deps(est):
"""Return soft dependencies of an estimator, as list of str."""
softdeps = est.get_class_tag("python_dependencies", None)
Expand All @@ -164,14 +103,6 @@ def _is_in_env(modules):
return False


def soft_deps_installed(estimator):
"""Return whether soft dependencies of an estimator are installed in env."""
softdeps = _get_soft_deps(estimator)
if _is_in_env(softdeps):
return True
return False


# all estimators - exclude estimators on the global exclusion list
all_ests = all_estimators(return_names=False, exclude_estimators=EXCLUDE_ESTIMATORS)

Expand Down

0 comments on commit a997a64

Please sign in to comment.