Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

job-list: misc cleanup #5144

Merged
merged 8 commits into from May 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
9 changes: 5 additions & 4 deletions doc/man1/flux-jobs.rst
Expand Up @@ -46,10 +46,11 @@ OPTIONS
Limit output to N jobs (default 1000)

**--since**\ *WHEN*
Limit output to jobs that completed or have become inactive since a
given timestamp. This option implies ``-a`` if no other ``--filter``
options are specified. If *WHEN* begins with ``-`` character, then
the remainder is considered to be a an offset in Flux standard duration
Limit output to jobs that have been active since a given timestamp. In other
words, jobs that are currently pending, currently running, or became inactive
since the given timestamp. This option implies ``-a`` if no other
``--filter`` options are specified. If *WHEN* begins with ``-`` character,
then the remainder is considered to be a an offset in Flux standard duration
(RFC 23). Otherwise, any datetime expression accepted by the Python
`parsedatetime <https://github.com/bear/parsedatetime>`_ module is
accepted. Examples: "-6h", "-1d", "yesterday", "2021-06-21 6am",
Expand Down
5 changes: 0 additions & 5 deletions src/bindings/python/flux/job/list.py
Expand Up @@ -236,11 +236,6 @@ def set_user(self, user):
def add_filter(self, fname):
"""Append a state or result filter to JobList query"""
fname = fname.lower()
if fname == "all":
self.states |= self.STATES["pending"]
self.states |= self.STATES["running"]
return

if fname in self.STATES:
self.states |= self.STATES[fname]
elif fname in self.RESULTS:
Expand Down
24 changes: 12 additions & 12 deletions src/cmd/flux-job.c
Expand Up @@ -1381,7 +1381,7 @@
flux_future_destroy (f);
flux_close (h);

return (0);
return (0);
}

int cmd_list_inactive (optparse_t *p, int argc, char **argv)
Expand Down Expand Up @@ -2566,7 +2566,7 @@
else if (streq (name, "submit")) {
if (!(ctx->exec_eventlog_f = flux_job_event_watch (ctx->h,
ctx->id,
"guest.exec.eventlog",
"guest.exec.eventlog",
0)))
log_err_exit ("flux_job_event_watch");
if (flux_future_then (ctx->exec_eventlog_f,
Expand Down Expand Up @@ -2855,12 +2855,12 @@
else if (verbose > 1 || exitcode != 0) {
if (!exception)
log_msg ("%s: exited with exit code %d",
jobid,
exitcode);
jobid,
exitcode);
else
log_msg ("%s: exception type=%s",
jobid,
exc_type);
jobid,
exc_type);
}
}
flux_future_destroy (futures[i]);
Expand Down Expand Up @@ -3601,10 +3601,10 @@
"id", id,
"volatile", optparse_hasopt (p, "volatile"),
"memo", memo)))
log_err_exit ("flux_rpc_pack");
log_err_exit ("flux_rpc_pack");

Check warning on line 3604 in src/cmd/flux-job.c

View check run for this annotation

Codecov / codecov/patch

src/cmd/flux-job.c#L3604

Added line #L3604 was not covered by tests

if (flux_rpc_get (f, NULL) < 0)
log_msg_exit ("memo: %s", future_strerror (f, errno));
log_msg_exit ("memo: %s", future_strerror (f, errno));

flux_future_destroy (f);
flux_close (h);
Expand Down Expand Up @@ -3782,10 +3782,10 @@
log_err_exit ("flux_open");

if (!(f = flux_rpc_pack (h, "job-info.lookup", FLUX_NODEID_ANY, 0,
"{s:I s:[s] s:i}",
"id", id,
"keys", "R",
"flags", 0)))
"{s:I s:[s] s:i}",
"id", id,
"keys", "R",
"flags", 0)))
log_err_exit ("flux_rpc_pack");
if (flux_rpc_get_unpack (f, "{s:s}", "R", &R) < 0
|| !(rl = rlist_from_R (R))
Expand Down
10 changes: 7 additions & 3 deletions src/modules/job-list/list.c
Expand Up @@ -79,9 +79,13 @@ int get_jobs_from_list (json_t *jobs,
job = zlistx_first (list);
while (job) {

/* If job->t_inactive > 0. (we're on the inactive jobs list),
* and job->t_inactive > since, then we're done since inactive
* jobs are sorted by inactive time.
/* If job->t_inactive > 0. we're on the inactive jobs list and jobs are
* sorted on the inactive list, larger t_inactive first.
*
* If job->t_inactive > since, this is a job that could potentially be returned
*
* So if job->t_inactive <= since, then we're done b/c the rest of the inactive
* jobs cannot be returned.
*/
if (job->t_inactive > 0. && job->t_inactive <= since)
break;
Expand Down
3 changes: 0 additions & 3 deletions src/modules/job-list/list.h
Expand Up @@ -16,9 +16,6 @@
void list_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg);

void list_inactive_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg);

void list_id_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg);

Expand Down
34 changes: 30 additions & 4 deletions t/t2250-job-archive.t
Expand Up @@ -171,6 +171,11 @@ test_expect_success 'job-archive: load module' '
flux module load job-archive
'

test_expect_success 'job-archive: launch a running job' '
jobid=`flux submit sleep inf` &&
echo $jobid > long_running_job.id
'

test_expect_success 'job-archive: stores inactive job info (job good)' '
jobid=`flux submit hostname` &&
fj_wait_event $jobid clean &&
Expand All @@ -189,6 +194,27 @@ test_expect_success 'job-archive: stores inactive job info (job fail)' '
db_check_values_run $jobid ${ARCHIVEDB}
'

# ensure long running job wasn't stored
test_expect_success 'job-archive: check 2 jobs stored' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 2
'

test_expect_success 'job-archive: cancel long running job' '
jobid=$(cat long_running_job.id) &&
flux cancel $jobid &&
fj_wait_event $jobid clean &&
wait_jobid_state $jobid inactive &&
wait_db $jobid ${ARCHIVEDB} &&
db_check_entries $jobid ${ARCHIVEDB} &&
db_check_values_run $jobid ${ARCHIVEDB}
'

test_expect_success 'job-archive: check 3 jobs stored' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 3
'

# to ensure job canceled before we run, we submit a job to eat up all
# resources first.
test_expect_success 'job-archive: stores inactive job info (job cancel)' '
Expand Down Expand Up @@ -217,7 +243,7 @@ test_expect_success 'job-archive: stores inactive job info (resources)' '

test_expect_success 'job-archive: all jobs stored' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 5
test $count -eq 6
'

test_expect_success 'job-archive: reload module' '
Expand All @@ -226,7 +252,7 @@ test_expect_success 'job-archive: reload module' '

test_expect_success 'job-archive: doesnt restore old data' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 5
test $count -eq 6
'

test_expect_success 'job-archive: stores more inactive job info' '
Expand All @@ -246,7 +272,7 @@ test_expect_success 'job-archive: stores more inactive job info' '

test_expect_success 'job-archive: all jobs stored' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 7
test $count -eq 8
'

# we don't check values in module stats b/c it can be racy w/ polling
Expand All @@ -260,7 +286,7 @@ test_expect_success 'job-archive: unload module' '

test_expect_success 'job-archive: db exists after module unloaded' '
count=`db_count_entries ${ARCHIVEDB}` &&
test $count -eq 7
test $count -eq 8
'

test_expect_success 'job-archive: setup config file without dbpath' '
Expand Down
4 changes: 2 additions & 2 deletions t/t2260-job-list.t
Expand Up @@ -185,8 +185,8 @@ test_expect_success 'flux job list inactive jobs results are correct' '
test_cmp list_result_I.out list_result_I.exp
'

# Hard code results values for these tests, as we did not add a results
# option to flux_job_list() or the flux-job command.
# flux job list does not take results as an option, test via direct
# call to job-list.list

test_expect_success 'flux job list only canceled jobs' '
id=$(id -u) &&
Expand Down
2 changes: 1 addition & 1 deletion t/t2800-jobs-cmd.t
Expand Up @@ -182,7 +182,7 @@ test_expect_success 'flux-jobs: custom format with numeric spec works' '
test_expect_success 'flux-jobs: collapsible fields work' '
flux jobs -ao "{id.f58:<12} ?:{exception.type:>8}" >nocollapse.out &&
flux jobs -f running,completed \
-ao "{id.f58:<12} ?:{exception.type:>8}" >collapsed.out &&
-o "{id.f58:<12} ?:{exception.type:>8}" >collapsed.out &&
test_debug "head -n1 nocollapse.out" &&
test_debug "head -n1 collapsed.out" &&
grep EXCEPTION-TYPE nocollapse.out &&
Expand Down