diff --git a/.github/workflows/build_upload_pypi_wheels.yml b/.github/workflows/build_upload_pypi_wheels.yml index 37bc72676..d7c08a0da 100644 --- a/.github/workflows/build_upload_pypi_wheels.yml +++ b/.github/workflows/build_upload_pypi_wheels.yml @@ -10,7 +10,7 @@ jobs: strategy: matrix: os: [windows-latest, macos-13, ubuntu-latest] - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.10', '3.11'] include: - os: windows-latest wheelname: win @@ -19,16 +19,10 @@ jobs: - os: ubuntu-latest wheelname: manylinux # Build wheels against the lowest compatible Numpy version - - python-version: 3.8 - manylinux-version-tag: cp38 - numpy-version: 1.19.5 - - python-version: 3.9 - manylinux-version-tag: cp39 - numpy-version: 1.19.5 - - python-version: 3.10 + - python-version: '3.10' manylinux-version-tag: cp310 numpy-version: 1.21.3 - - python-version: 3.11 + - python-version: '3.11' manylinux-version-tag: cp311 numpy-version: 1.23.2 fail-fast: false diff --git a/.github/workflows/create-landing-page.yml b/.github/workflows/create-landing-page.yml index 8b11e18b2..30aab5019 100644 --- a/.github/workflows/create-landing-page.yml +++ b/.github/workflows/create-landing-page.yml @@ -17,7 +17,7 @@ jobs: ref: gh-pages - uses: actions/setup-python@v4 with: - python-version: 3.8 + python-version: '3.10' - name: Update pip and install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 5a88312d4..ae2fc05ca 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v3 with: - python-version: 3.8 + python-version: '3.10' channels: conda-forge,defaults channel-priority: true - name: Install llvm on Macos @@ -36,10 +36,10 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install -r tests_and_analysis/ci_requirements.txt - - name: Run tests, skip Python 3.9, 3.10 unless workflow dispatch + - name: Run tests, skip Python 3.11 unless workflow dispatch if: github.event_name != 'workflow_dispatch' env: - TOX_SKIP_ENV: '.*?(py39|py310).*?' + TOX_SKIP_ENV: '.*?(py311).*?' shell: bash -l {0} run: python -m tox - name: Run tests, workflow dispatch so test all Python versions @@ -70,7 +70,7 @@ jobs: - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v3 with: - python-version: 3.8 + python-version: '3.10' channels: conda-forge,defaults channel-priority: true - name: Update pip and install dependencies diff --git a/.github/workflows/test_release.yml b/.github/workflows/test_release.yml index 8022a728f..5f22da5f0 100644 --- a/.github/workflows/test_release.yml +++ b/.github/workflows/test_release.yml @@ -10,14 +10,14 @@ jobs: test: strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest, windows-latest, macos-latest, macos-13] fail-fast: false runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: - python-version: 3.8 + python-version: '3.10' channels: conda-forge,defaults channel-priority: true - name: Install llvm on Macos diff --git a/.readthedocs.yml b/.readthedocs.yml index 0022c976b..8ec0978d5 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -13,7 +13,7 @@ sphinx: build: os: ubuntu-22.04 tools: - python: "3.8" + python: "3.10" # Optionally set the version of Python and requirements required to build your docs python: diff --git a/doc/requirements.txt b/doc/requirements.txt index 8ddd5b313..aec40cc1c 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,4 @@ -numpy>=1.14.5 +numpy>=1.21.3 sphinx==5.3.0 sphinx-argparse==0.3.2 sphinx-autodoc-typehints==1.19.5 diff --git a/doc/source/cite.rst b/doc/source/cite.rst index 5ffa1daad..bf5f784d8 100644 --- a/doc/source/cite.rst +++ b/doc/source/cite.rst @@ -12,7 +12,7 @@ or it can be read programatically as follows: import yaml import euphonic - from importlib_resources import files + from importlib.resources import files with open(files(euphonic) / 'CITATION.cff') as fp: citation_data = yaml.safe_load(fp) diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 5aebc05e3..4a9fccb1a 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -5,7 +5,7 @@ Installation .. contents:: :local: -Euphonic has been tested on Python 3.8 - 3.10. +Euphonic has been tested on Python 3.10 - 3.12. Pip === @@ -47,7 +47,7 @@ To create a "complete" installation in a new environment: .. code-block:: bash - conda create -n euphonic-forge -c conda-forge python=3.8 euphonic matplotlib-base pyyaml tqdm h5py + conda create -n euphonic-forge -c conda-forge python=3.10 euphonic matplotlib-base pyyaml tqdm h5py This creates an environment named "euphonic-forge", which can be entered with ``activate euphonic-forge`` and exited with diff --git a/euphonic/__init__.py b/euphonic/__init__.py index 580fa48fd..eec22412b 100644 --- a/euphonic/__init__.py +++ b/euphonic/__init__.py @@ -1,9 +1,10 @@ +from importlib.resources import files + from . import _version __version__ = _version.get_versions()['version'] import pint from pint import UnitRegistry -from importlib_resources import files # Create ureg here so it is only created once ureg = UnitRegistry() diff --git a/euphonic/readers/castep.py b/euphonic/readers/castep.py index c7f373dbb..b170ab052 100644 --- a/euphonic/readers/castep.py +++ b/euphonic/readers/castep.py @@ -111,7 +111,7 @@ def read_phonon_dos_data( _, idx = np.unique(atom_type, return_index=True) unique_types = atom_type[np.sort(idx)] for i, species in enumerate(unique_types): - dos_dict[species] = dos_data[:, i + 2]/dos_conv + dos_dict[str(species)] = dos_data[:, i + 2]/dos_conv return data_dict diff --git a/euphonic/styles/__init__.py b/euphonic/styles/__init__.py index 5a568cd22..0997c9818 100644 --- a/euphonic/styles/__init__.py +++ b/euphonic/styles/__init__.py @@ -1,5 +1,5 @@ """Matplotlib stylesheets for plot styling""" -from importlib_resources import files +from importlib.resources import files base_style = files(__package__) / "base.mplstyle" intensity_widget_style = files(__package__) / "intensity_widget.mplstyle" diff --git a/euphonic/util.py b/euphonic/util.py index bdbc93722..f1d3d0aeb 100644 --- a/euphonic/util.py +++ b/euphonic/util.py @@ -1,17 +1,16 @@ -from collections import OrderedDict from functools import reduce +from importlib.resources import files import itertools import json import math import os.path import sys -from typing import Dict, Sequence, Union, Tuple, Optional, List +from typing import Sequence, Optional import warnings import numpy as np import seekpath from seekpath.hpkot import SymmetryDetectionError -from importlib_resources import files from pint import UndefinedUnitError from euphonic import ureg, Quantity @@ -52,7 +51,7 @@ def direction_changed(qpts: np.ndarray, tolerance: float = 5e-6 return np.abs(np.abs(dot) - modq[1:]*modq[:-1]) > tolerance -def is_gamma(qpt: np.ndarray) -> Union[bool, np.ndarray]: +def is_gamma(qpt: np.ndarray) -> bool | np.ndarray: """ Determines whether the given point(s) are gamma points @@ -73,7 +72,7 @@ def is_gamma(qpt: np.ndarray) -> Union[bool, np.ndarray]: return isgamma -def mp_grid(grid: Tuple[int, int, int]) -> np.ndarray: +def mp_grid(grid: tuple[int, int, int]) -> np.ndarray: """ Returns the q-points on a MxNxL Monkhorst-Pack grid specified by grid @@ -101,8 +100,8 @@ def mp_grid(grid: Tuple[int, int, int]) -> np.ndarray: return np.column_stack((qh, qk, ql)) -def get_all_origins(max_xyz: Tuple[int, int, int], - min_xyz: Tuple[int, int, int] = (0, 0, 0), +def get_all_origins(max_xyz: tuple[int, int, int], + min_xyz: tuple[int, int, int] = (0, 0, 0), step: int = 1) -> np.ndarray: """ Given the max/min number of cells in each direction, get a list of @@ -133,10 +132,10 @@ def get_all_origins(max_xyz: Tuple[int, int, int], def get_qpoint_labels(qpts: np.ndarray, - cell: Optional[Tuple[List[List[float]], - List[List[float]], - List[int]]] = None - ) -> List[Tuple[int, str]]: + cell: Optional[tuple[list[list[float]], + list[list[float]], + list[int]]] = None + ) -> list[tuple[int, str]]: """ Gets q-point labels (e.g. GAMMA, X, L) for the q-points at which the path through reciprocal space changes direction, or where a point @@ -170,7 +169,7 @@ def get_qpoint_labels(qpts: np.ndarray, def get_reference_data(collection: str = 'Sears1992', physical_property: str = 'coherent_scattering_length' - ) -> Dict[str, Quantity]: + ) -> dict[str, Quantity]: """ Get physical data as a dict of (possibly-complex) floats from reference data. @@ -204,7 +203,7 @@ def get_reference_data(collection: str = 'Sears1992', Returns ------- - Dict[str, Quantity] + dict[str, Quantity] Requested data as a dict with string keys and (possibly-complex) float Quantity values. String or None items of the original data file will be omitted. @@ -304,7 +303,7 @@ def convert_fc_phases(force_constants: np.ndarray, atom_r: np.ndarray, sc_atom_r: np.ndarray, uc_to_sc_atom_idx: np.ndarray, sc_to_uc_atom_idx: np.ndarray, sc_matrix: np.ndarray, cell_origins_tol: float = 1e-5 - ) -> Tuple[np.ndarray, np.ndarray]: + ) -> tuple[np.ndarray, np.ndarray]: """ Convert from a force constants matrix which uses the atom coordinates as r in the e^-iq.r phase (Phonopy-like), to a @@ -390,7 +389,7 @@ def convert_fc_phases(force_constants: np.ndarray, atom_r: np.ndarray, # atom 0, so the same cell origins can be used for all atoms cell_origins_map = np.zeros((n_atoms_sc), dtype=np.int32) # Get origins of adjacent supercells in prim cell frac coords - sc_origins = get_all_origins((2,2,2), min_xyz=(-1,-1,-1)) + sc_origins = get_all_origins((2, 2, 2), min_xyz=(-1, -1, -1)) sc_origins_pcell = np.einsum('ij,jk->ik', sc_origins, sc_matrix) for i in range(n_atoms_sc): co_idx = np.where( @@ -429,7 +428,7 @@ def convert_fc_phases(force_constants: np.ndarray, atom_r: np.ndarray, sc_relative_idx = _get_supercell_relative_idx(cell_origins, sc_matrix) fc_converted[i, sc_relative_idx[cell_idx]] = fc_tmp - fc_converted = np.reshape(np.transpose( + fc_converted = np.reshape(np.transpose( fc_converted, axes=[1, 0, 3, 2, 4]), (n_cells, 3*n_atoms_uc, 3*n_atoms_uc)) return fc_converted, cell_origins @@ -444,16 +443,15 @@ def _cell_vectors_to_volume(cell_vectors: Quantity) -> Quantity: def _get_unique_elems_and_idx( - all_elems: Sequence[Tuple[Union[int, str], ...]] - ) -> 'OrderedDict[Tuple[Union[int, str], ...], np.ndarray]': + all_elems: Sequence[tuple[int | str, ...]] + ) -> dict[tuple[int | str, ...], np.ndarray]: """ Returns an ordered dictionary mapping the unique sequences of elements to their indices """ - # Abuse OrderedDict to get ordered set - unique_elems = OrderedDict( - zip(all_elems, itertools.cycle([None]))).keys() - return OrderedDict(( + # Abuse dict keys to get an "ordered set" of elems for iteration + unique_elems = dict(zip(all_elems, itertools.cycle([None]))).keys() + return dict(( elem, np.asarray([i for i, other_elem in enumerate(all_elems) if elem == other_elem]) @@ -461,7 +459,7 @@ def _get_unique_elems_and_idx( def _calc_abscissa(reciprocal_cell: Quantity, qpts: np.ndarray - ) -> Quantity: + ) -> Quantity: """ Calculates the distance between q-points (e.g. to use as a plot x-coordinate) @@ -519,10 +517,10 @@ def _calc_abscissa(reciprocal_cell: Quantity, qpts: np.ndarray def _recip_space_labels(qpts: np.ndarray, - cell: Optional[Tuple[List[List[float]], - List[List[float]], - List[int]]] - ) -> Tuple[np.ndarray, np.ndarray]: + cell: Optional[tuple[list[list[float]], + list[list[float]], + list[int]]] + ) -> tuple[np.ndarray, np.ndarray]: """ Gets q-points point labels (e.g. GAMMA, X, L) for the q-points at which the path through reciprocal space changes direction or where a @@ -592,7 +590,7 @@ def _recip_space_labels(qpts: np.ndarray, return labels, qpts_with_labels -def _generic_qpt_labels() -> Dict[str, Tuple[float, float, float]]: +def _generic_qpt_labels() -> dict[str, tuple[float, float, float]]: """ Returns a dictionary relating fractional q-point label strings to their coordinates e.g. '1/4 1/2 1/4' = [0.25, 0.5, 0.25]. Used for @@ -612,7 +610,7 @@ def _generic_qpt_labels() -> Dict[str, Tuple[float, float, float]]: def _get_qpt_label(qpt: np.ndarray, - point_labels: Dict[str, Tuple[float, float, float]] + point_labels: dict[str, tuple[float, float, float]] ) -> str: """ Gets a label for a particular q-point, based on the high symmetry diff --git a/release_tox.ini b/release_tox.ini index e3b71c556..1a75af0be 100644 --- a/release_tox.ini +++ b/release_tox.ini @@ -2,7 +2,7 @@ # Use conda to set up the python environments to run in requires = tox-conda # The python environments to run the tests in -envlist = pypi-py38-min,conda-py38-old-np,{pypi,conda}-{py38,py39,py310,py311},pypisource-{py38,py311} +envlist = pypi-py310-min,conda-py310-old-np,{pypi,conda}-{py310,py311,py312},pypisource-{py310,py312} # Skip the execution of setup.py as we do it with the correct version in commands_pre below skipsdist = True @@ -11,7 +11,7 @@ changedir = tests_and_analysis/test test_command = python run_tests.py --report # Test PyPI source distribution -[testenv:pypisource-{py38,py311}] +[testenv:pypisource-{py310,py312}] install_command = python -m pip install {opts} {packages} deps = numpy @@ -24,7 +24,7 @@ commands_pre = commands = {[testenv]test_command} -[testenv:pypi-{py38,py39,py310,py311}] +[testenv:pypi-{py310,py311,py312}] install_command = python -m pip install {opts} {packages} deps = numpy @@ -36,10 +36,10 @@ commands_pre = --only-binary 'euphonic' commands = {[testenv]test_command} -[testenv:pypi-py38-min] +[testenv:pypi-py310-min] install_command = python -m pip install --force-reinstall {opts} {packages} deps = - numpy==1.19.5 + numpy==1.21.3 commands_pre = python -m pip install --force-reinstall \ -r{toxinidir}/tests_and_analysis/minimum_euphonic_requirements.txt @@ -50,7 +50,7 @@ commands_pre = --only-binary 'euphonic' commands = {[testenv]test_command} -[testenv:conda-{py38,py39,py310,py311}] +[testenv:conda-{py310,py311,py312}] whitelist_externals = conda install_command = conda install {packages} conda_channels = @@ -65,7 +65,7 @@ commands = {[testenv]test_command} -m "not brille" # Test against a version of Numpy less than the latest for Conda # See https://github.com/conda-forge/euphonic-feedstock/pull/20 -[testenv:conda-py38-old-np] +[testenv:conda-py310-old-np] whitelist_externals = conda install_command = conda install {packages} conda_channels = @@ -74,7 +74,7 @@ conda_channels = conda_deps = --file={toxinidir}/tests_and_analysis/tox_requirements.txt commands_pre = - conda install numpy=1.20 + conda install numpy=1.22 conda install -c conda-forge euphonic={env:EUPHONIC_VERSION} matplotlib-base pyyaml h5py # Brille not available on conda commands = {[testenv]test_command} -m "not brille" diff --git a/setup.py b/setup.py index 38ef6339c..0b2cf2df1 100644 --- a/setup.py +++ b/setup.py @@ -139,16 +139,15 @@ def run_setup(): include_package_data=True, install_requires=[ 'packaging', - 'scipy>=1.10', # requires numpy >= 1.19.5 + 'scipy>=1.10', # requires numpy >= 1.19.5; py3.10 requires 1.21.3 'seekpath>=1.1.0', 'spglib>=1.9.4', - 'pint>=0.19', - 'importlib_resources>=1.3.0', # equivalent to Python 3.9 + 'pint>=0.22', 'threadpoolctl>=1.0.0' ], extras_require={ - 'matplotlib': ['matplotlib>=3.2.0'], - 'phonopy_reader': ['h5py>=2.10.0', 'PyYAML>=3.13'], + 'matplotlib': ['matplotlib>=3.8.0'], + 'phonopy_reader': ['h5py>=3.6.0', 'PyYAML>=6.0'], 'brille': ['brille>=0.7.0'] }, entry_points={'console_scripts': [ diff --git a/tests_and_analysis/minimum_euphonic_requirements.txt b/tests_and_analysis/minimum_euphonic_requirements.txt index c0f56715f..d7ba1efd8 100644 --- a/tests_and_analysis/minimum_euphonic_requirements.txt +++ b/tests_and_analysis/minimum_euphonic_requirements.txt @@ -1,10 +1,9 @@ -numpy==1.19.5 +numpy==1.21.3 scipy==1.10.0 spglib==1.9.4.2 seekpath==1.1.0 -pint==0.19.0 -importlib_resources==1.3.0 -matplotlib==3.2.0 -h5py==2.10.0 -PyYAML==3.13 +pint==0.22 +matplotlib==3.8 +h5py==3.6 +PyYAML==6.0 threadpoolctl==1.0.0 diff --git a/tests_and_analysis/test/euphonic_test/test_install.py b/tests_and_analysis/test/euphonic_test/test_install.py index 9bd723402..486cdad50 100644 --- a/tests_and_analysis/test/euphonic_test/test_install.py +++ b/tests_and_analysis/test/euphonic_test/test_install.py @@ -1,4 +1,4 @@ -from importlib_resources import files +from importlib.resources import files import pytest diff --git a/tests_and_analysis/test/run_tests.py b/tests_and_analysis/test/run_tests.py index c52c9b881..aff3afc5d 100644 --- a/tests_and_analysis/test/run_tests.py +++ b/tests_and_analysis/test/run_tests.py @@ -2,7 +2,6 @@ import sys import os import time -from typing import Tuple, List, Union import pytest import coverage @@ -14,7 +13,7 @@ def main(): (do_report_coverage, do_report_tests, tests, markers_to_run) = _get_parsed_args(test_dir) - pytest_options: List[str] = _build_pytest_options( + pytest_options: list[str] = _build_pytest_options( reports_dir, do_report_tests, tests, markers_to_run) test_exit_code: int = run_tests( @@ -24,7 +23,7 @@ def main(): sys.exit(test_exit_code) -def _get_test_and_reports_dir() -> Tuple[str, str]: +def _get_test_and_reports_dir() -> tuple[str, str]: """ Get the directory that holds the tests and the directory to write reports to. If the directory to write reports to isn't present, it @@ -44,7 +43,7 @@ def _get_test_and_reports_dir() -> Tuple[str, str]: return test_dir, reports_dir -def _get_parsed_args(test_dir: str) -> Tuple[bool, bool, str, str]: +def _get_parsed_args(test_dir: str) -> tuple[bool, bool, str, str]: """ Get the arguments parsed to this script and return some formatted variables. @@ -81,7 +80,7 @@ def _get_parsed_args(test_dir: str) -> Tuple[bool, bool, str, str]: def _build_pytest_options(reports_dir: str, do_report_tests: bool, - tests: str, markers: str) -> List[str]: + tests: str, markers: str) -> list[str]: """ Build the options for pytest to use. @@ -93,16 +92,16 @@ def _build_pytest_options(reports_dir: str, do_report_tests: bool, Whether to write the test reports to junit xml or not. tests : str The tests to run e.g. script_tests or test_bands_data.py - markers : Union[str, None] + markers : str|None The markers for pytest tests to run e.g. "unit" or "unit or integration" Returns ------- - List[str] + list[str] A list of options to run pytest with. """ - options: List[str] = [tests] + options: list[str] = [tests] # Add reporting of test results if do_report_tests: # We may have multiple reports, so get a unique filename @@ -116,14 +115,14 @@ def _build_pytest_options(reports_dir: str, do_report_tests: bool, return options -def run_tests(pytest_options: List[str], do_report_coverage: bool, +def run_tests(pytest_options: list[str], do_report_coverage: bool, reports_dir: str, test_dir: str) -> int: """ Run the tests and record coverage if selected. Parameters ---------- - pytest_options : List[str] + pytest_options : list[str] The options to pass to pytest do_report_coverage : bool If true report coverage to coverage*.xml @@ -143,7 +142,7 @@ def run_tests(pytest_options: List[str], do_report_coverage: bool, pytest_options = ['--import-mode=append'] + pytest_options # Start recording coverage if requested - cov: Union[coverage.Coverage, None] = None + cov: coverage.Coverage | None = None if do_report_coverage: coveragerc_filepath: str = os.path.join(test_dir, ".coveragerc") cov = coverage.Coverage(config_file=coveragerc_filepath) diff --git a/tox.ini b/tox.ini index 0132e7eed..4b36e2afc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,7 @@ [tox] requires = tox-conda # The python environments to run the tests in -envlist = py38,py39,py310,py311,py38-{base,matplotlib,phonopy_reader,brille,all},py38-minrequirements-linux +envlist = py310,py311,py312,py310-{base,matplotlib,phonopy_reader,brille,all},py310-minrequirements-linux # Skip the execution of setup.py as we do it with the correct arg in commands_pre below skipsdist = True @@ -9,7 +9,7 @@ skipsdist = True changedir = tests_and_analysis/test test_command = python run_tests.py --report -[testenv:{py38,py39,py310,py311}] +[testenv:{py310,py311,py312}] install_command = python -m pip install \ --force-reinstall \ @@ -29,7 +29,7 @@ commands = {[testenv]test_command} --cov # Test with no extras -[testenv:py38-base] +[testenv:py310-base] install_command = {[testenv:py310]install_command} deps = {[testenv:py310]deps} commands_pre = @@ -40,7 +40,7 @@ commands_pre = commands = {[testenv]test_command} --cov -m "not (phonopy_reader or matplotlib or brille)" # Test with matplotlib extra only -[testenv:py38-matplotlib] +[testenv:py310-matplotlib] install_command = {[testenv:py310]install_command} deps = {[testenv:py310]deps} commands_pre = @@ -51,7 +51,7 @@ commands_pre = commands = {[testenv]test_command} --cov -m "matplotlib and not multiple_extras" # Test with phonopy_reader extra only -[testenv:py38-phonopy_reader] +[testenv:py310-phonopy_reader] install_command = {[testenv:py310]install_command} deps = {[testenv:py310]deps} commands_pre = @@ -62,7 +62,7 @@ commands_pre = commands = {[testenv]test_command} --cov -m "phonopy_reader and not multiple_extras" # Test with brille extra only -[testenv:py38-brille] +[testenv:py310-brille] install_command = {[testenv:py310]install_command} deps = {[testenv:py310]deps} commands_pre = @@ -73,7 +73,7 @@ commands_pre = commands = {[testenv]test_command} --cov -m "brille and not multiple_extras" # Run remaining tests that require multiple extras -[testenv:py38-all] +[testenv:py310-all] install_command = {[testenv:py310]install_command} deps = {[testenv:py310]deps} commands_pre = @@ -84,14 +84,14 @@ commands_pre = commands = {[testenv]test_command} --cov -m multiple_extras -[testenv:py38-minrequirements-linux] +[testenv:py310-minrequirements-linux] whitelist_externals = rm install_command = python -m pip install --force-reinstall {opts} {packages} platform = linux: linux deps = - numpy==1.19.5 + numpy==1.21.3 commands_pre = python -m pip install --force-reinstall \ -r{toxinidir}/tests_and_analysis/minimum_euphonic_requirements.txt