@@ -39,8 +39,6 @@ class SlurmClusterExecutor(BaseExecutor):
3939 - threads_per_core (int): number of OpenMP threads to be used for each function call
4040 - gpus_per_core (int): number of GPUs per worker - defaults to 0
4141 - cwd (str/None): current working directory where the parallel python task is executed
42- - openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and
43- SLURM only) - default False
4442 - slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
4543 - error_log_file (str): Name of the error log file to use for storing exceptions raised
4644 by the Python functions submitted to the Executor.
@@ -68,6 +66,7 @@ class SlurmClusterExecutor(BaseExecutor):
6866 export_workflow_filename (str): Name of the file to store the exported workflow graph in.
6967 log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
7068 wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.
69+ openmpi_oversubscribe (bool): adds the `--oversubscribe` command flag (OpenMPI and SLURM) - default False
7170
7271 Examples:
7372 ```
@@ -108,6 +107,7 @@ def __init__(
108107 export_workflow_filename : Optional [str ] = None ,
109108 log_obj_size : bool = False ,
110109 wait : bool = True ,
110+ openmpi_oversubscribe : bool = False ,
111111 ):
112112 """
113113 The executorlib.SlurmClusterExecutor leverages either the message passing interface (MPI), the SLURM workload
@@ -127,8 +127,6 @@ def __init__(
127127 - threads_per_core (int): number of OpenMP threads to be used for each function call
128128 - gpus_per_core (int): number of GPUs per worker - defaults to 0
129129 - cwd (str/None): current working directory where the parallel python task is executed
130- - openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI
131- and SLURM only) - default False
132130 - slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM
133131 only)
134132 - error_log_file (str): Name of the error log file to use for storing exceptions
@@ -156,14 +154,15 @@ def __init__(
156154 export_workflow_filename (str): Name of the file to store the exported workflow graph in.
157155 log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
158156 wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.
157+ openmpi_oversubscribe (bool): adds the `--oversubscribe` command flag (OpenMPI and SLURM) - default False
159158
160159 """
161160 default_resource_dict : dict = {
162161 "cores" : 1 ,
163162 "threads_per_core" : 1 ,
164163 "gpus_per_core" : 0 ,
165164 "cwd" : None ,
166- "openmpi_oversubscribe" : False ,
165+ "openmpi_oversubscribe" : openmpi_oversubscribe ,
167166 "slurm_cmd_args" : [],
168167 }
169168 if resource_dict is None :
@@ -260,8 +259,6 @@ class SlurmJobExecutor(BaseExecutor):
260259 - threads_per_core (int): number of OpenMP threads to be used for each function call
261260 - gpus_per_core (int): number of GPUs per worker - defaults to 0
262261 - cwd (str/None): current working directory where the parallel python task is executed
263- - openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI and
264- SLURM only) - default False
265262 - slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM only)
266263 - num_nodes (int, optional): The number of compute nodes to use for executing the task.
267264 Defaults to None.
@@ -291,6 +288,7 @@ class SlurmJobExecutor(BaseExecutor):
291288 export_workflow_filename (str): Name of the file to store the exported workflow graph in.
292289 log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
293290 wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.
291+ openmpi_oversubscribe (bool): adds the `--oversubscribe` command flag (OpenMPI and SLURM) - default False
294292
295293 Examples:
296294 ```
@@ -330,6 +328,7 @@ def __init__(
330328 export_workflow_filename : Optional [str ] = None ,
331329 log_obj_size : bool = False ,
332330 wait : bool = True ,
331+ openmpi_oversubscribe : bool = False ,
333332 ):
334333 """
335334 The executorlib.SlurmJobExecutor leverages either the message passing interface (MPI), the SLURM workload
@@ -349,8 +348,6 @@ def __init__(
349348 - threads_per_core (int): number of OpenMP threads to be used for each function call
350349 - gpus_per_core (int): number of GPUs per worker - defaults to 0
351350 - cwd (str/None): current working directory where the parallel python task is executed
352- - openmpi_oversubscribe (bool): adds the `--oversubscribe` command line flag (OpenMPI
353- and SLURM only) - default False
354351 - slurm_cmd_args (list): Additional command line arguments for the srun call (SLURM
355352 only)
356353 - num_nodes (int, optional): The number of compute nodes to use for executing the task.
@@ -381,14 +378,15 @@ def __init__(
381378 export_workflow_filename (str): Name of the file to store the exported workflow graph in.
382379 log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
383380 wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.
381+ openmpi_oversubscribe (bool): adds the `--oversubscribe` command flag (OpenMPI and SLURM) - default False
384382
385383 """
386384 default_resource_dict : dict = {
387385 "cores" : 1 ,
388386 "threads_per_core" : 1 ,
389387 "gpus_per_core" : 0 ,
390388 "cwd" : None ,
391- "openmpi_oversubscribe" : False ,
389+ "openmpi_oversubscribe" : openmpi_oversubscribe ,
392390 "slurm_cmd_args" : [],
393391 }
394392 if resource_dict is None :
0 commit comments