Skip to content

Commit

Permalink
Merge pull request #343 from pyiron/minimal
Browse files Browse the repository at this point in the history
mpi4py as optional dependency
  • Loading branch information
jan-janssen authored May 30, 2024
2 parents 1e55fa3 + 7792f3d commit 38276a7
Show file tree
Hide file tree
Showing 15 changed files with 145 additions and 34 deletions.
8 changes: 8 additions & 0 deletions .ci_support/environment-mini.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
channels:
- conda-forge
dependencies:
- python
- numpy
- cloudpickle =3.0.0
- tqdm =4.66.4
- pyzmq =26.0.3
32 changes: 32 additions & 0 deletions .github/workflows/minimal.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# This workflow is used to run the unittest of pyiron

name: Unittests-minimal

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: conda-incubator/[email protected]
with:
python-version: "3.12"
mamba-version: "*"
channels: conda-forge
miniforge-variant: Mambaforge
channel-priority: strict
auto-update-conda: true
environment-file: .ci_support/environment-mini.yml
- name: Test
shell: bash -l {0}
timeout-minutes: 5
run: |
pip install versioneer[toml]==0.29
pip install . --no-deps --no-build-isolation
cd tests
python -m unittest discover .
2 changes: 1 addition & 1 deletion notebooks/examples.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions pympipool/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class Executor:
points to the same address as localhost. Still MacOS >= 12 seems to disable
this look up for security reasons. So on MacOS it is required to set this
option to true
backend (str): Switch between the different backends "flux", "mpi" or "slurm". Alternatively, when "auto"
backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
is selected (the default) the available backend is determined automatically.
block_allocation (boolean): To accelerate the submission of a series of python functions with the same resource
requirements, pympipool supports block allocation. In this case all resources have
Expand Down Expand Up @@ -138,7 +138,7 @@ def __new__(
points to the same address as localhost. Still MacOS >= 12 seems to disable
this look up for security reasons. So on MacOS it is required to set this
option to true
backend (str): Switch between the different backends "flux", "mpi" or "slurm". Alternatively, when "auto"
backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
is selected (the default) the available backend is determined automatically.
block_allocation (boolean): To accelerate the submission of a series of python functions with the same
resource requirements, pympipool supports block allocation. In this case all
Expand Down
4 changes: 2 additions & 2 deletions pympipool/scheduler/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def create_executor(
any computer should be able to resolve that their own hostname points to the same
address as localhost. Still MacOS >= 12 seems to disable this look up for security
reasons. So on MacOS it is required to set this option to true
backend (str): Switch between the different backends "flux", "mpi" or "slurm". Alternatively, when "auto"
backend (str): Switch between the different backends "flux", "local" or "slurm". Alternatively, when "auto"
is selected (the default) the available backend is determined automatically.
block_allocation (boolean): To accelerate the submission of a series of python functions with the same
resource requirements, pympipool supports block allocation. In this case all
Expand Down Expand Up @@ -145,7 +145,7 @@ def create_executor(
cwd=cwd,
hostname_localhost=hostname_localhost,
)
else: # backend="mpi"
else: # backend="local"
check_threads_per_core(threads_per_core=threads_per_core)
check_gpus_per_worker(gpus_per_worker=gpus_per_worker)
check_command_line_argument_lst(
Expand Down
7 changes: 6 additions & 1 deletion pympipool/shared/executorbase.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Executor as FutureExecutor,
Future,
)
import importlib.util
import inspect
import os
import queue
Expand Down Expand Up @@ -422,8 +423,12 @@ def _get_backend_path(cores: int):
list[str]: List of strings containing the python executable path and the backend script to execute
"""
command_lst = [sys.executable]
if cores > 1:
if cores > 1 and importlib.util.find_spec("mpi4py") is not None:
command_lst += [_get_command_path(executable="mpiexec.py")]
elif cores > 1:
raise ImportError(
"mpi4py is required for parallel calculations. Please install mpi4py."
)
else:
command_lst += [_get_command_path(executable="serial.py")]
return command_lst
Expand Down
6 changes: 3 additions & 3 deletions pympipool/shared/inputcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def check_refresh_rate(refresh_rate: float):
def validate_backend(
backend: str, flux_installed: bool = False, slurm_installed: bool = False
) -> str:
if backend not in ["auto", "mpi", "slurm", "flux"]:
if backend not in ["auto", "flux", "local", "slurm"]:
raise ValueError(
'The currently implemented backends are ["flux", "mpi", "slurm"]. '
'The currently implemented backends are ["auto", "flux", "local", "slurm"]. '
'Alternatively, you can select "auto", the default option, to automatically determine the backend. But '
+ backend
+ " is not a valid choice."
Expand All @@ -90,7 +90,7 @@ def validate_backend(
elif backend == "slurm" or (backend == "auto" and slurm_installed):
return "slurm"
else:
return "mpi"
return "local"


def check_pmi(backend: str, pmi: Optional[str]):
Expand Down
6 changes: 4 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[build-system]
requires = ["cloudpickle", "mpi4py", "pyzmq", "setuptools", "tqdm", "versioneer[toml]==0.29"]
requires = ["cloudpickle", "pyzmq", "setuptools", "tqdm", "versioneer[toml]==0.29"]
build-backend = "setuptools.build_meta"

[project]
Expand All @@ -25,7 +25,6 @@ classifiers = [
]
dependencies = [
"cloudpickle==3.0.0",
"mpi4py==3.1.6",
"pyzmq==26.0.3",
"tqdm==4.66.4",
]
Expand All @@ -36,6 +35,9 @@ Homepage = "https://github.com/pyiron/pympipool"
Documentation = "https://pympipool.readthedocs.io"
Repository = "https://github.com/pyiron/pympipool"

[project.optional-dependencies]
mpi = ["mpi4py==3.1.6"]

[tool.setuptools.packages.find]
include = ["pympipool*"]

Expand Down
4 changes: 2 additions & 2 deletions tests/benchmark/llh.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def run_static(mean=0.1, sigma=1.1, runs=32):
sigma=1.1,
runs=32,
max_cores=4,
backend="mpi",
backend="local",
block_allocation=True,
)
elif run_mode == "pympipool":
Expand All @@ -62,7 +62,7 @@ def run_static(mean=0.1, sigma=1.1, runs=32):
sigma=1.1,
runs=32,
max_cores=4,
backend="mpi",
backend="local",
block_allocation=False,
)
elif run_mode == "flux":
Expand Down
2 changes: 1 addition & 1 deletion tests/test_dependencies_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def add_function(parameter_1, parameter_2):

class TestExecutorWithDependencies(unittest.TestCase):
def test_executor(self):
with Executor(max_cores=1, backend="mpi", hostname_localhost=True) as exe:
with Executor(max_cores=1, backend="local", hostname_localhost=True) as exe:
cloudpickle_register(ind=1)
future_1 = exe.submit(add_function, 1, parameter_2=2)
future_2 = exe.submit(add_function, 1, parameter_2=future_1)
Expand Down
17 changes: 12 additions & 5 deletions tests/test_executor_backend_mpi.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import importlib.util
import unittest

from pympipool import Executor
from pympipool.shared.executorbase import cloudpickle_register


skip_mpi4py_test = importlib.util.find_spec("mpi4py") is None


def calc(i):
return i

Expand All @@ -19,7 +23,7 @@ def mpi_funct(i):
class TestExecutorBackend(unittest.TestCase):
def test_meta_executor_serial(self):
with Executor(
max_cores=2, hostname_localhost=True, backend="mpi", block_allocation=True
max_cores=2, hostname_localhost=True, backend="local", block_allocation=True
) as exe:
cloudpickle_register(ind=1)
fs_1 = exe.submit(calc, 1)
Expand All @@ -31,7 +35,7 @@ def test_meta_executor_serial(self):

def test_meta_executor_single(self):
with Executor(
max_cores=1, hostname_localhost=True, backend="mpi", block_allocation=True
max_cores=1, hostname_localhost=True, backend="local", block_allocation=True
) as exe:
cloudpickle_register(ind=1)
fs_1 = exe.submit(calc, 1)
Expand All @@ -41,12 +45,15 @@ def test_meta_executor_single(self):
self.assertTrue(fs_1.done())
self.assertTrue(fs_2.done())

@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
def test_meta_executor_parallel(self):
with Executor(
max_workers=2,
cores_per_worker=2,
hostname_localhost=True,
backend="mpi",
backend="local",
block_allocation=True,
) as exe:
cloudpickle_register(ind=1)
Expand All @@ -61,13 +68,13 @@ def test_errors(self):
cores_per_worker=1,
threads_per_core=2,
hostname_localhost=True,
backend="mpi",
backend="local",
)
with self.assertRaises(TypeError):
Executor(
max_cores=1,
cores_per_worker=1,
gpus_per_worker=1,
hostname_localhost=True,
backend="mpi",
backend="local",
)
26 changes: 12 additions & 14 deletions tests/test_executor_backend_mpi_noblock.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,17 @@ def calc(i):
return i


def mpi_funct(i):
from mpi4py import MPI

size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
return i, size, rank


def resource_dict(resource_dict):
return resource_dict


class TestExecutorBackend(unittest.TestCase):
def test_meta_executor_serial(self):
with Executor(
max_cores=2, hostname_localhost=True, backend="mpi", block_allocation=False
max_cores=2,
hostname_localhost=True,
backend="local",
block_allocation=False,
) as exe:
cloudpickle_register(ind=1)
fs_1 = exe.submit(calc, 1)
Expand All @@ -35,7 +30,10 @@ def test_meta_executor_serial(self):

def test_meta_executor_single(self):
with Executor(
max_cores=1, hostname_localhost=True, backend="mpi", block_allocation=False
max_cores=1,
hostname_localhost=True,
backend="local",
block_allocation=False,
) as exe:
cloudpickle_register(ind=1)
fs_1 = exe.submit(calc, 1)
Expand All @@ -52,29 +50,29 @@ def test_errors(self):
cores_per_worker=1,
threads_per_core=2,
hostname_localhost=True,
backend="mpi",
backend="local",
)
with self.assertRaises(TypeError):
Executor(
max_cores=1,
cores_per_worker=1,
gpus_per_worker=1,
hostname_localhost=True,
backend="mpi",
backend="local",
)
with self.assertRaises(ValueError):
with Executor(
max_cores=1,
hostname_localhost=True,
backend="mpi",
backend="local",
block_allocation=False,
) as exe:
exe.submit(resource_dict, resource_dict={})
with self.assertRaises(ValueError):
with Executor(
max_cores=1,
hostname_localhost=True,
backend="mpi",
backend="local",
block_allocation=True,
) as exe:
exe.submit(resource_dict, resource_dict={})
22 changes: 22 additions & 0 deletions tests/test_mpi_executor.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from concurrent.futures import CancelledError, Future
import importlib.util
from queue import Queue
from time import sleep
import unittest
Expand All @@ -14,6 +15,9 @@
)


skip_mpi4py_test = importlib.util.find_spec("mpi4py") is None


def calc(i):
return i

Expand Down Expand Up @@ -127,6 +131,9 @@ def test_pympiexecutor_errors(self):
)


@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
class TestPyMpiExecutorMPI(unittest.TestCase):
def test_pympiexecutor_one_worker_with_mpi(self):
with PyMPIExecutor(
Expand Down Expand Up @@ -164,6 +171,9 @@ def test_pympiexecutor_one_worker_with_mpi_echo(self):
self.assertEqual(output, [2, 2])


@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
class TestPyMpiStepExecutorMPI(unittest.TestCase):
def test_pympiexecutor_one_worker_with_mpi(self):
with PyMPIStepExecutor(
Expand Down Expand Up @@ -300,6 +310,9 @@ def test_executor_exception_future(self):
fs = p.submit(raise_error)
fs.result()

@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
def test_meta(self):
meta_data_exe_dict = {
"cores": 2,
Expand Down Expand Up @@ -339,6 +352,9 @@ def test_meta_step(self):
else:
self.assertEqual(str(exe.info[k]), v)

@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
def test_pool_multi_core(self):
with PyMPIExecutor(
max_workers=1, cores_per_worker=2, hostname_localhost=True
Expand All @@ -352,6 +368,9 @@ def test_pool_multi_core(self):
self.assertEqual(len(p), 0)
self.assertEqual(output.result(), [(2, 2, 0), (2, 2, 1)])

@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
def test_pool_multi_core_map(self):
with PyMPIExecutor(
max_workers=1, cores_per_worker=2, hostname_localhost=True
Expand Down Expand Up @@ -408,6 +427,9 @@ def test_execute_task(self):
self.assertEqual(f.result(), np.array(4))
q.join()

@unittest.skipIf(
skip_mpi4py_test, "mpi4py is not installed, so the mpi4py tests are skipped."
)
def test_execute_task_parallel(self):
f = Future()
q = Queue()
Expand Down
Loading

0 comments on commit 38276a7

Please sign in to comment.