Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python executable from config file #623

Merged
merged 5 commits into from
Feb 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions dask_jobqueue/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def __init__(
job_directives_skip=None,
log_directory=None,
shebang=None,
python=sys.executable,
python=None,
job_name=None,
config_name=None,
):
Expand Down Expand Up @@ -206,6 +206,11 @@ def __init__(
)
)

if python is None:
python = dask.config.get("jobqueue.%s.python" % self.config_name)
if python is None:
python = sys.executable

if job_name is None:
job_name = dask.config.get("jobqueue.%s.name" % self.config_name)
if processes is None:
Expand Down Expand Up @@ -339,8 +344,7 @@ def __init__(

# dask-worker command line build
dask_worker_command = "%(python)s -m %(worker_command)s" % dict(
python=python,
worker_command=worker_command
python=python, worker_command=worker_command
)

command_args = [dask_worker_command, self.scheduler]
Expand Down
8 changes: 8 additions & 0 deletions dask_jobqueue/jobqueue.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -40,6 +41,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -72,6 +74,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -104,6 +107,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -137,6 +141,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -169,6 +174,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -204,6 +210,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down Expand Up @@ -234,6 +241,7 @@ jobqueue:
memory: null # Total amount of memory per job
processes: null # Number of Python processes per job

python: null # Python executable
interface: null # Network interface to use like eth0 or ib0
death-timeout: 60 # Number of seconds to wait if a worker can not find a scheduler
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
Expand Down
1 change: 1 addition & 0 deletions dask_jobqueue/tests/test_htcondor.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ def test_config_name_htcondor_takes_custom_config():
"shebang": "#!/usr/bin/env condor_submit",
"local-directory": "/tmp",
"shared-temp-directory": None,
"python": None,
}

with dask.config.set({"jobqueue.htcondor-config-name": conf}):
Expand Down
13 changes: 7 additions & 6 deletions dask_jobqueue/tests/test_lsf.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script

with LSFCluster(
queue="general",
Expand All @@ -123,9 +123,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script

with LSFCluster(
walltime="1:00",
Expand Down Expand Up @@ -321,6 +321,7 @@ def test_config_name_lsf_takes_custom_config():
"log-directory": None,
"shebang": "#!/usr/bin/env bash",
"use-stdin": None,
"python": None,
}

with dask.config.set({"jobqueue.lsf-config-name": conf}):
Expand Down
13 changes: 7 additions & 6 deletions dask_jobqueue/tests/test_oar.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script

with OARCluster(
walltime="00:02:00",
Expand Down Expand Up @@ -115,9 +115,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script


def test_config_name_oar_takes_custom_config():
Expand Down Expand Up @@ -147,6 +147,7 @@ def test_config_name_oar_takes_custom_config():
"job-mem": None,
"resource-spec": None,
"memory-per-core-property-name": "memcore",
"python": None,
}

with dask.config.set({"jobqueue.oar-config-name": conf}):
Expand Down
13 changes: 7 additions & 6 deletions dask_jobqueue/tests/test_pbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ def test_job_script(Cluster):
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script

with Cluster(
queue="regular",
Expand All @@ -96,9 +96,9 @@ def test_job_script(Cluster):
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script


@pytest.mark.env("pbs")
Expand Down Expand Up @@ -360,6 +360,7 @@ def test_config_name_pbs_takes_custom_config():
"job-cpu": None,
"job-mem": None,
"resource-spec": None,
"python": None,
}

with dask.config.set({"jobqueue.pbs-config-name": conf}):
Expand Down
1 change: 1 addition & 0 deletions dask_jobqueue/tests/test_sge.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def test_config_name_sge_takes_custom_config():
"job-cpu": None,
"job-mem": None,
"resource-spec": None,
"python": None,
}

with dask.config.set({"jobqueue.sge-config-name": conf}):
Expand Down
13 changes: 7 additions & 6 deletions dask_jobqueue/tests/test_slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script

with SLURMCluster(
walltime="00:02:00",
Expand Down Expand Up @@ -107,9 +107,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
assert ("--nthreads 2" in job_script)
assert ("--nworkers 4" in job_script)
assert (f"--memory-limit {formatted_bytes}" in job_script)
assert "--nthreads 2" in job_script
assert "--nworkers 4" in job_script
assert f"--memory-limit {formatted_bytes}" in job_script


@pytest.mark.env("slurm")
Expand Down Expand Up @@ -196,6 +196,7 @@ def test_config_name_slurm_takes_custom_config():
"shebang": "#!/usr/bin/env bash",
"job-cpu": None,
"job-mem": None,
"python": None,
}

with dask.config.set({"jobqueue.slurm-config-name": conf}):
Expand Down
Loading