From 926cbb52d68bb1517a00411922de10866bdfe851 Mon Sep 17 00:00:00 2001 From: Francesco Nattino <49899980+fnattino@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:10:45 +0100 Subject: [PATCH] Python executable from config file (#623) * Consider Python from config file before setting default * add entry to base config file * fix tests with custom config * black formatting --- dask_jobqueue/core.py | 10 +++++++--- dask_jobqueue/jobqueue.yaml | 8 ++++++++ dask_jobqueue/tests/test_htcondor.py | 1 + dask_jobqueue/tests/test_lsf.py | 13 +++++++------ dask_jobqueue/tests/test_oar.py | 13 +++++++------ dask_jobqueue/tests/test_pbs.py | 13 +++++++------ dask_jobqueue/tests/test_sge.py | 1 + dask_jobqueue/tests/test_slurm.py | 13 +++++++------ 8 files changed, 45 insertions(+), 27 deletions(-) diff --git a/dask_jobqueue/core.py b/dask_jobqueue/core.py index b265ac87..62af14aa 100644 --- a/dask_jobqueue/core.py +++ b/dask_jobqueue/core.py @@ -177,7 +177,7 @@ def __init__( job_directives_skip=None, log_directory=None, shebang=None, - python=sys.executable, + python=None, job_name=None, config_name=None, ): @@ -206,6 +206,11 @@ def __init__( ) ) + if python is None: + python = dask.config.get("jobqueue.%s.python" % self.config_name) + if python is None: + python = sys.executable + if job_name is None: job_name = dask.config.get("jobqueue.%s.name" % self.config_name) if processes is None: @@ -339,8 +344,7 @@ def __init__( # dask-worker command line build dask_worker_command = "%(python)s -m %(worker_command)s" % dict( - python=python, - worker_command=worker_command + python=python, worker_command=worker_command ) command_args = [dask_worker_command, self.scheduler] diff --git a/dask_jobqueue/jobqueue.yaml b/dask_jobqueue/jobqueue.yaml index 3bcb8c58..f9424158 100644 --- a/dask_jobqueue/jobqueue.yaml +++ b/dask_jobqueue/jobqueue.yaml @@ -7,6 +7,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -40,6 +41,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -72,6 +74,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -104,6 +107,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -137,6 +141,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -169,6 +174,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -204,6 +210,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR @@ -234,6 +241,7 @@ jobqueue: memory: null # Total amount of memory per job processes: null # Number of Python processes per job + python: null # Python executable interface: null # Network interface to use like eth0 or ib0 death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler local-directory: null # Location of fast local storage like /scratch or $TMPDIR diff --git a/dask_jobqueue/tests/test_htcondor.py b/dask_jobqueue/tests/test_htcondor.py index ea57e004..4ee455af 100644 --- a/dask_jobqueue/tests/test_htcondor.py +++ b/dask_jobqueue/tests/test_htcondor.py @@ -148,6 +148,7 @@ def test_config_name_htcondor_takes_custom_config(): "shebang": "#!/usr/bin/env condor_submit", "local-directory": "/tmp", "shared-temp-directory": None, + "python": None, } with dask.config.set({"jobqueue.htcondor-config-name": conf}): diff --git a/dask_jobqueue/tests/test_lsf.py b/dask_jobqueue/tests/test_lsf.py index 87e2cd67..96fdc601 100644 --- a/dask_jobqueue/tests/test_lsf.py +++ b/dask_jobqueue/tests/test_lsf.py @@ -95,9 +95,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script with LSFCluster( queue="general", @@ -123,9 +123,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script with LSFCluster( walltime="1:00", @@ -321,6 +321,7 @@ def test_config_name_lsf_takes_custom_config(): "log-directory": None, "shebang": "#!/usr/bin/env bash", "use-stdin": None, + "python": None, } with dask.config.set({"jobqueue.lsf-config-name": conf}): diff --git a/dask_jobqueue/tests/test_oar.py b/dask_jobqueue/tests/test_oar.py index c9384885..894ffdd4 100644 --- a/dask_jobqueue/tests/test_oar.py +++ b/dask_jobqueue/tests/test_oar.py @@ -82,9 +82,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script with OARCluster( walltime="00:02:00", @@ -115,9 +115,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script def test_config_name_oar_takes_custom_config(): @@ -147,6 +147,7 @@ def test_config_name_oar_takes_custom_config(): "job-mem": None, "resource-spec": None, "memory-per-core-property-name": "memcore", + "python": None, } with dask.config.set({"jobqueue.oar-config-name": conf}): diff --git a/dask_jobqueue/tests/test_pbs.py b/dask_jobqueue/tests/test_pbs.py index 8ec3a019..89db0f68 100644 --- a/dask_jobqueue/tests/test_pbs.py +++ b/dask_jobqueue/tests/test_pbs.py @@ -71,9 +71,9 @@ def test_job_script(Cluster): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script with Cluster( queue="regular", @@ -96,9 +96,9 @@ def test_job_script(Cluster): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script @pytest.mark.env("pbs") @@ -360,6 +360,7 @@ def test_config_name_pbs_takes_custom_config(): "job-cpu": None, "job-mem": None, "resource-spec": None, + "python": None, } with dask.config.set({"jobqueue.pbs-config-name": conf}): diff --git a/dask_jobqueue/tests/test_sge.py b/dask_jobqueue/tests/test_sge.py index ab761223..9e347ed9 100644 --- a/dask_jobqueue/tests/test_sge.py +++ b/dask_jobqueue/tests/test_sge.py @@ -66,6 +66,7 @@ def test_config_name_sge_takes_custom_config(): "job-cpu": None, "job-mem": None, "resource-spec": None, + "python": None, } with dask.config.set({"jobqueue.sge-config-name": conf}): diff --git a/dask_jobqueue/tests/test_slurm.py b/dask_jobqueue/tests/test_slurm.py index 51dbbfb2..075a0eae 100644 --- a/dask_jobqueue/tests/test_slurm.py +++ b/dask_jobqueue/tests/test_slurm.py @@ -73,9 +73,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script with SLURMCluster( walltime="00:02:00", @@ -107,9 +107,9 @@ def test_job_script(): in job_script ) formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "") - assert ("--nthreads 2" in job_script) - assert ("--nworkers 4" in job_script) - assert (f"--memory-limit {formatted_bytes}" in job_script) + assert "--nthreads 2" in job_script + assert "--nworkers 4" in job_script + assert f"--memory-limit {formatted_bytes}" in job_script @pytest.mark.env("slurm") @@ -196,6 +196,7 @@ def test_config_name_slurm_takes_custom_config(): "shebang": "#!/usr/bin/env bash", "job-cpu": None, "job-mem": None, + "python": None, } with dask.config.set({"jobqueue.slurm-config-name": conf}):