diff --git a/dask_jobqueue/slurm.py b/dask_jobqueue/slurm.py index d31e5562..dba37867 100644 --- a/dask_jobqueue/slurm.py +++ b/dask_jobqueue/slurm.py @@ -205,7 +205,7 @@ def __init__(self, *args, scheduler_file="scheduler-{job_id}.json", **kwargs): if not scheduler_file: raise RuntimeError( "scheduler_file must be specified in either the " - "scheduler_options or as keyword argument to SlurmRunner." + "scheduler_options or as keyword argument to SLURMRunner." ) # Encourage filename uniqueness by inserting the job ID diff --git a/docs/source/runners-overview.rst b/docs/source/runners-overview.rst index a542399a..68a72468 100644 --- a/docs/source/runners-overview.rst +++ b/docs/source/runners-overview.rst @@ -11,13 +11,13 @@ variable to decide what role reach process should play and uses the shared files # myscript.py from dask.distributed import Client - from dask_jobqueue.slurm import SlurmRunner + from dask_jobqueue.slurm import SLURMRunner - # When entering the SlurmRunner context manager processes will decide if they should be + # When entering the SLURMRunner context manager processes will decide if they should be # the client, schdeduler or a worker. # Only process ID 1 executes the contents of the context manager. # All other processes start the Dask components and then block here forever. - with SlurmRunner(scheduler_file="/path/to/shared/filesystem/scheduler-{job_id}.json") as runner: + with SLURMRunner(scheduler_file="/path/to/shared/filesystem/scheduler-{job_id}.json") as runner: # The runner object contains the scheduler address info and can be used to construct a client. with Client(runner) as client: @@ -29,4 +29,4 @@ variable to decide what role reach process should play and uses the shared files assert client.submit(lambda x: x + 1, 10).result() == 11 assert client.submit(lambda x: x + 1, 20, workers=2).result() == 21 - # When process ID 1 exits the SlurmRunner context manager it sends a graceful shutdown to the Dask processes. + # When process ID 1 exits the SLURMRunner context manager it sends a graceful shutdown to the Dask processes.