Skip to content

Commit

Permalink
wb | remove Cardano World QA profiles
Browse files Browse the repository at this point in the history
  • Loading branch information
fmaste authored and mgmeier committed May 8, 2024
1 parent 4a3d57e commit aa92293
Show file tree
Hide file tree
Showing 5 changed files with 143 additions and 258 deletions.
86 changes: 52 additions & 34 deletions nix/workbench/backend/nomad-job.nix
Original file line number Diff line number Diff line change
Expand Up @@ -190,23 +190,45 @@ let
# namespace can be specified either with the flag -namespace or read from
# the NOMAD_NAMESPACE environment variable."
# https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces
namespace = "perf"; # Default to "perf" to avoid errors were possible.
namespace = null;

# The region in which to execute the job.
region = "global"; # SRE: They are actually using global.

# A list of datacenters in the region which are eligible for task
# A list of datacenters in the region which are eligible for task
# placement. This must be provided, and does not have a default.
# What we currently have available:
# - Cardano World cluster: "eu-central-1", "us-east-2"
# - Dedicated P&T cluster: "eu-central-1", "us-east-1", and "ap-southeast-2"
datacenters = [ "ap-southeast-2" "eu-central-1" "us-east-1" "us-east-2" ];
datacenters = lib.lists.unique # The regions of the nodes to deploy.
(lib.attrsets.mapAttrsToList
(name: value: value.region)
profileData.node-specs.value
)
;

# Specifies user-defined constraints on the task. This can be provided
# multiple times to define additional constraints.
# Cloud runs set the distinct hosts constraint here but local runs can't
# because we are only starting one Nomad client.
constraint = null; # Values are appended inside the workbench (bash).
# Cloud runs set the distinct hosts constraint, not for local runs because
# we are only starting one Nomad client.
constraint = # A list, values are appended inside the workbench (bash).
if builtins.all (r: r == "loopback")
(lib.attrsets.mapAttrsToList
(name: value: value.region)
profileData.node-specs.value
)
then []
# Unique placement:
## "distinct_hosts": Instructs the scheduler to not co-locate any groups
## on the same machine. When specified as a job constraint, it applies
## to all groups in the job. When specified as a group constraint, the
## effect is constrained to that group. This constraint can not be
## specified at the task level. Note that the attribute parameter should
## be omitted when using this constraint.
## https://developer.hashicorp.com/nomad/docs/job-specification/constraint#distinct_hosts
else [
{ operator = "distinct_hosts";
value = "true";
}
]
;

# The reschedule stanza specifies the group's rescheduling strategy. If
# specified at the job level, the configuration will apply to all groups
Expand Down Expand Up @@ -347,30 +369,22 @@ let
# for a set of nodes. Affinities may be expressed on attributes or
# client metadata. Additionally affinities may be specified at the
# job, group, or task levels for ultimate flexibility.
affinity =
let region = nodeSpec.region;
in if region == null || region == "loopback"
then null
else
{ attribute = "\${node.datacenter}";
value = region;
}
;
affinity = null; # Remember: AFFINITY != CONSTRAINT

# This can be provided multiple times to define additional constraints.
# See the Nomad constraint reference for more details.
# https://developer.hashicorp.com/nomad/docs/job-specification/constraint
constraint = {
attribute = "\${node.class}";
operator = "=";
# Cloud jobs can run in the dedicated P&T Nomad cluster on AWS or in
# Cardano World Nomad cluster's "qa" class nodes.
# This default is just a precaution, like the top level namespace,
# because "qa" Class nodes usage must be limited to short test and
# "infra" Class nodes, that are used for HA jobs, must be avoided
# entirely.
value = "perf";
};
constraint = # A list, values are appended inside the workbench (bash).
let region = nodeSpec.region;
in if region == null || region == "loopback"
then []
else
[
{ attribute = "\${node.datacenter}";
value = region;
}
]
;

# The network stanza specifies the networking requirements for the task
# group, including the network mode and port allocations.
Expand Down Expand Up @@ -500,12 +514,16 @@ let
SSL_CERT_FILE = "${containerSpecs.containerPkgs.cacert.nix-store-path}/etc/ssl/certs/ca-bundle.crt";
};

# Sensible defaults to run cloud version of "default", "ci-test" and
# "ci-bench" in Cardano World Nomad cluster's "qa" class nodes.
# For benchmarking the dedicated P&T Nomad cluster on AWS is used and
# this value should be updated accordingly.
# Sensible defaults.
# These values were set to run cloud version of "default", "ci-test"
# and "ci-bench" profiles in Cardano World Nomad cluster's "qa" class
# nodes that is not available anymore but were kept as a precaution
# because the Nomad Cloud workbench backend can be used with
# custom/private Nomad clusters.
# For benchmarking on the dedicated P&T Nomad cluster this value
# should be updated accordingly.
resources = {
# Task can only ask for 'cpu' or 'cores' resource but not both.
# Tasks can only ask for 'cpu' or 'cores' resource but not both.
cores = 2; # cpu = 512;
memory = 1024*4; # memory_max = 32768;
};
Expand Down
11 changes: 2 additions & 9 deletions nix/workbench/backend/nomad.sh
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ backend_nomad() {
* ) break;; esac; shift; done

# Create the dispatcher's local directories hierarchy.
mkdir "${dir}"/nomad
backend_nomad allocate-run-directory-nomad "${dir}"
backend_nomad allocate-run-directory-supervisor "${dir}"
backend_nomad allocate-run-directory-tracers "${dir}"
Expand All @@ -88,13 +89,6 @@ backend_nomad() {
local nomad_task_driver=$(envjqr 'nomad_task_driver')
# TODO: Store them on disk for later subcommands run from a different shell.
# echo "{\"nomad_environment\": $nomad_environment, }" > "$dir"/env.json

# Update the Nomad Job specs file accordingly
## - Job Name
### Must match `^[a-zA-Z0-9-]{1,128}$)` or it won't be possible to use it
### as namespace.: "invalid name "2023-02-10-06.34.f178b.ci-test-bage.nom"".
local nomad_job_name=$(basename "${dir}")
backend_nomad allocate-run-nomad-job-patch-name "${dir}" "${nomad_job_name}"
;;

allocate-run-directory-nomad )
Expand Down Expand Up @@ -3339,8 +3333,7 @@ nomad_create_client_config() {
region = "global"
# Specifies the data center of the local agent. All members of a datacenter
# should share a local LAN connection.
# Use one of "eu-central-1", "eu-west-1" or "us-east-2" to mimic SRE
datacenter = "eu-central-1"
datacenter = "loopback"
# Specifies the name of the local node. This value is used to identify
# individual agents. When specified on a server, the name must be unique within
# the region.
Expand Down

0 comments on commit aa92293

Please sign in to comment.