From 005336cd401ab2d3b0adf2393526b8fe3cdc5bbe Mon Sep 17 00:00:00 2001 From: Nicola Sirena Date: Fri, 18 Aug 2023 17:34:15 +0200 Subject: [PATCH] Reduce wait-time while checking mpi jobs While testing slurm we also test mpi job submission. The wait time to check the mpi job started was of comparable size with the time requested to complete the job itself. This lead to flacky results. By reducing this wait time the chances of failures should be greatly reduced. Signed-off-by: Nicola Sirena --- tests/integration-tests/tests/schedulers/test_slurm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration-tests/tests/schedulers/test_slurm.py b/tests/integration-tests/tests/schedulers/test_slurm.py index 69fa041e17..ef9514964e 100644 --- a/tests/integration-tests/tests/schedulers/test_slurm.py +++ b/tests/integration-tests/tests/schedulers/test_slurm.py @@ -1431,7 +1431,7 @@ def _test_mpi_job_termination(remote_command_executor, test_datadir, slurm_comma # Wait for compute node to start and check that mpi processes are started _wait_computefleet_running(region, cluster, remote_command_executor) - retry(wait_fixed=seconds(30), stop_max_delay=seconds(500))(_assert_job_state)( + retry(wait_fixed=seconds(10), stop_max_delay=seconds(500))(_assert_job_state)( slurm_commands, job_id, job_state="RUNNING" ) _check_mpi_process(remote_command_executor, slurm_commands, num_nodes=2, after_completion=False)