diff --git a/pyiron_base/jobs/job/extension/server/generic.py b/pyiron_base/jobs/job/extension/server/generic.py index 2c119feba..49ae4e476 100644 --- a/pyiron_base/jobs/job/extension/server/generic.py +++ b/pyiron_base/jobs/job/extension/server/generic.py @@ -45,6 +45,13 @@ class Server( cores (int): number of cores run_mode (pyiron_base.server.runmode.Runmode): mode of the job ['modal', 'non_modal', 'queue', 'manual'] new_hdf (bool): create a new HDF5 file [True/False] - default=True + accept_crash (bool): ignore execution errors raised by external executables - default False + run_time (int): run time limit in seconds for the job to finish - required for HPC job schedulers + memory_limit (str): memory required + qid (int): Queuing system ID - ID received from the HPC job scheduler + additional_arguments (dict): Additional arguments for the HPC job scheduler + conda_environment_name (str): Name of the conda environment + conda_environment_path (str): Path to the conda environment Attributes: @@ -94,6 +101,13 @@ def __init__( gpus: Optional[int] = None, run_mode: str = "modal", new_hdf: bool = True, + accept_crash: bool = False, + run_time: Optional[int] = None, + memory_limit: Optional[str] = None, + qid: Optional[int] = None, + additional_arguments: dict = {}, + conda_environment_name: Optional[str] = None, + conda_environment_path: Optional[str] = None, ): super().__init__() self._data = ServerDataClass( @@ -104,14 +118,14 @@ def __init__( gpus=gpus, threads=threads, new_hdf=new_hdf, - accept_crash=False, - run_time=None, - memory_limit=None, - queue=None, - qid=None, - additional_arguments={}, - conda_environment_name=None, - conda_environment_path=None, + accept_crash=accept_crash, + run_time=run_time, + memory_limit=memory_limit, + queue=queue, + qid=qid, + additional_arguments=additional_arguments, + conda_environment_name=conda_environment_name, + conda_environment_path=conda_environment_path, ) self._run_mode = Runmode() self._executor: Union[Executor, None] = None @@ -339,12 +353,12 @@ def run_time(self, new_run_time: int) -> None: self._data.run_time = new_run_time @property - def memory_limit(self) -> int: + def memory_limit(self) -> str: return self._data.memory_limit @memory_limit.setter @sentinel - def memory_limit(self, limit: int) -> None: + def memory_limit(self, limit: str) -> None: if state.queue_adapter is not None and self._data.queue is not None: memory_max = state.queue_adapter.check_queue_parameters( queue=self.queue, diff --git a/pyiron_base/project/decorator.py b/pyiron_base/project/decorator.py index 6e67974fa..40e9f6dcb 100644 --- a/pyiron_base/project/decorator.py +++ b/pyiron_base/project/decorator.py @@ -16,6 +16,13 @@ def pyiron_job( gpus: Optional[int] = None, run_mode: str = "modal", new_hdf: bool = True, + accept_crash: bool = False, + run_time: Optional[int] = None, + memory_limit: Optional[str] = None, + qid: Optional[int] = None, + additional_arguments: dict = {}, + conda_environment_name: Optional[str] = None, + conda_environment_path: Optional[str] = None, output_file_lst: list = [], output_key_lst: list = [], ): @@ -31,6 +38,13 @@ def pyiron_job( gpus (int): the number of gpus selected for the current simulation. run_mode (str): the run mode of the job ['modal', 'non_modal', 'queue', 'manual'] new_hdf (bool): defines whether a subjob should be stored in the same HDF5 file or in a new one. + accept_crash (bool): ignore execution errors raised by external executables - default False + run_time (int): run time limit in seconds for the job to finish - required for HPC job schedulers + memory_limit (str): memory required + qid (int): Queuing system ID - ID received from the HPC job scheduler + additional_arguments (dict): Additional arguments for the HPC job scheduler + conda_environment_name (str): Name of the conda environment + conda_environment_path (str): Path to the conda environment output_file_lst (list): output_key_lst (list): @@ -100,6 +114,13 @@ def function( "gpus": gpus, "run_mode": run_mode, "new_hdf": new_hdf, + "accept_crash": accept_crash, + "run_time": run_time, + "memory_limit": memory_limit, + "qid": qid, + "additional_arguments": additional_arguments, + "conda_environment_name": conda_environment_name, + "conda_environment_path": conda_environment_path, } return get_delayed_object( *args,