Skip to content

Commit

Permalink
fix(bolt): explicitly handle no nomad leader error (#971)
Browse files Browse the repository at this point in the history
<!-- Please make sure there is an issue that this PR is correlated to. -->

## Changes

<!-- If there are frontend changes, please include screenshots. -->
  • Loading branch information
NathanFlurry committed Jul 2, 2024
1 parent 61e9f14 commit 20822fc
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 1 deletion.
2 changes: 1 addition & 1 deletion infra/tf/k8s_infra/nomad.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ locals {
# This value must be 3, 5, or 7. More = better redundancy, but does not make things faster.
#
# See https://developer.hashicorp.com/nomad/tutorials/enterprise/production-reference-architecture-vm-with-consul
nomad_server_count = 3
nomad_server_count = var.deploy_method_cluster ? 3 : 1

nomad_server_addrs = [for i in range(0, local.nomad_server_count): "127.0.0.1:${6000 + i}"]
nomad_server_addrs_escaped = [for addr in local.nomad_server_addrs : "\"${addr}\""]
Expand Down
4 changes: 4 additions & 0 deletions lib/bolt/core/src/tasks/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -766,6 +766,10 @@ async fn cleanup_nomad_test(ctx: &ProjectContext, test_id: &str, purge: bool) ->
let output = cmd.output().await?;
ensure!(output.status.success());

if output.stdout == b"No cluster leader" {
panic!("no cluster leader");
}

let jobs: Vec<NomadJob> = serde_json::from_slice(&output.stdout)?;

// Cleanup jobs
Expand Down

0 comments on commit 20822fc

Please sign in to comment.