Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 34 additions & 18 deletions pyslurm/pyslurm.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -2182,6 +2182,7 @@ cdef class job:
Job_dict[u'ntasks_per_board'] = self._record.ntasks_per_board
Job_dict[u'num_cpus'] = self._record.num_cpus
Job_dict[u'num_nodes'] = self._record.num_nodes
Job_dict[u'num_tasks'] = self._record.num_tasks

if self._record.pack_job_id:
Job_dict[u'pack_job_id'] = self._record.pack_job_id
Expand Down Expand Up @@ -2580,6 +2581,7 @@ cdef class job:
wckey = job_opts.get("wckey").encode("UTF-8", "replace")
desc.wckey = wckey

# TODO when nodelist is set, min_nodes needs to be adjusted accordingly
if job_opts.get("nodelist"):
req_nodes = job_opts.get("nodelist").encode("UTF-8", "replace")
desc.req_nodes = req_nodes
Expand All @@ -2601,7 +2603,12 @@ cdef class job:
licenses = job_opts.get("licenses").encode("UTF-8", "replace")
desc.licenses = licenses

# TODO: nodes_set
if job_opts.get("min_nodes"):
desc.min_nodes = job_opts.get("min_nodes")
if job_opts.get("max_nodes"):
desc.max_nodes = job_opts.get("max_nodes")
elif "ntasks" in job_opts and job_opts.get("min_nodes") == 0:
desc.min_nodes = 0

if job_opts.get("ntasks_per_node"):
ntasks_per_node = job_opts.get("ntasks_per_node")
Expand Down Expand Up @@ -2732,20 +2739,22 @@ cdef class job:
if job_opts.get("tmpdisk"):
desc.pn_min_tmp_disk = job_opts.get("tmpdisk")

# TODO: declare and use MAX macro or use python max()?
# if job_opts.get("overcommit"):
# desc.min_cpus = max(job_opts.get("min_nodes", 1)
# desc.overcommit = job_opts.get("overcommit")
# elif job_opts.get("cpus_set"):
# # TODO: cpus_set
# # check for ntasks and cpus_per_task before multiplying
# desc.min_cpus = job_opts.get("ntasks") * job_opts.get("cpus_per_task")
# elif job_opts.get("nodes_set") and job_opts.get("min_nodes") == 0:
# desc.min_cpus = 0
# else:
# desc.min_cpus = job_opts.get("ntasks")

# TODO: ntasks_set, cpus_set
if job_opts.get("overcommit"):
Copy link
Member

@giovtorres giovtorres Oct 27, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we work on some tests for these changes?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

submit_batch_job does not seem to be tested beyond giving a wrap script and a job_name. I added simple tests for ntasks and cpus_per_task...

desc.min_cpus = max(job_opts.get("min_nodes", 1), 1)
desc.overcommit = job_opts.get("overcommit")
elif job_opts.get("cpus_per_task"):
desc.min_cpus = job_opts.get("ntasks", 1) * job_opts.get("cpus_per_task")
elif job_opts.get("nodelist") and job_opts.get("min_nodes") == 0:
desc.min_cpus = 0
else:
desc.min_cpus = job_opts.get("ntasks", 1)

if job_opts.get("cpus_per_task"):
desc.cpus_per_task = job_opts.get("cpus_per_task")

if job_opts.get("ntasks"):
desc.num_tasks = job_opts.get("ntasks")

if job_opts.get("ntasks_per_socket"):
desc.ntasks_per_socket = job_opts.get("ntasks_per_socket")

Expand Down Expand Up @@ -2833,8 +2842,13 @@ cdef class job:

# FIXME: should this be python's getcwd or C's getcwd?
# also, allow option to specify work_dir, if not, set default
cwd = os.getcwd().encode("UTF-8", "replace")
desc.work_dir = cwd

if job_opts.get("work_dir"):
work_dir = job_opts.get("work_dir").encode("UTF-8", "replace")
desc.work_dir = work_dir
else:
cwd = os.getcwd().encode("UTF-8", "replace")
desc.work_dir = cwd

if job_opts.get("requeue"):
desc.requeue = job_opts.get("requeue")
Expand Down Expand Up @@ -5561,9 +5575,11 @@ cdef class slurmdb_jobs:

def __cinit__(self):
self.job_cond = <slurm.slurmdb_job_cond_t *>slurm.xmalloc(sizeof(slurm.slurmdb_job_cond_t))
self.db_conn = slurm.slurmdb_connection_get()

def __dealloc__(self):
pass
slurm.xfree(self.job_cond)
slurm.slurmdb_connection_close(&self.db_conn)

def get(self, jobids=[], starttime=0, endtime=0):
u"""Get Slurmdb information about some jobs.
Expand Down
10 changes: 9 additions & 1 deletion tests/test-job.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,17 @@

def test_job_submit():
"""Job: Test job().submit_batch_job()."""
test_job = {"wrap": "sleep 3600", "job_name": "pyslurm_test_job"}
test_job = {
"wrap": "sleep 3600",
"job_name": "pyslurm_test_job",
"ntasks": 2,
"cpus_per_task": 3,
}
test_job_id = pyslurm.job().submit_batch_job(test_job)
test_job_search = pyslurm.job().find(name="name", val="pyslurm_test_job")
assert_true(test_job_id in test_job_search)
assert_equals(test_job_search["cpus_per_task"], 3)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💯

assert_equals(test_job_search["num_tasks"], 2)


def test_job_get():
Expand Down Expand Up @@ -58,6 +65,7 @@ def test_job_scontrol():
assert_equals(test_job_info["nice"], int(sctl_dict["Nice"]))
assert_equals(test_job_info["num_cpus"], int(sctl_dict["NumCPUs"]))
assert_equals(test_job_info["num_nodes"], int(sctl_dict["NumNodes"]))
assert_equals(test_job_info["num_tasks"], int(sctl_dict["NumTasks"]))
assert_equals(test_job_info["partition"], sctl_dict["Partition"])
assert_equals(test_job_info["priority"], int(sctl_dict["Priority"]))
assert_equals(test_job_info["state_reason"], sctl_dict["Reason"])
Expand Down