Skip to content

Commit

Permalink
Format shell scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
triarius committed Oct 9, 2023
1 parent 70c164f commit 545ed67
Show file tree
Hide file tree
Showing 28 changed files with 168 additions and 170 deletions.
8 changes: 4 additions & 4 deletions .buildkite/steps/cleanup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# shellcheck disable=SC2016
set -uxo pipefail

if [[ $OSTYPE =~ ^darwin ]] ; then
if [[ $OSTYPE =~ ^darwin ]]; then
cutoff_date=$(gdate --date='-1 days' +%Y-%m-%d)
cutoff_date_milli=$(gdate --date='-1 days' +%s%3N)
else
Expand All @@ -15,7 +15,7 @@ echo "--- Cleaning up resources older than ${cutoff_date}"
echo "--- Deleting test managed secrets buckets created"
aws s3api list-buckets \
--output text \
--query "$(printf 'Buckets[?CreationDate<`%s`].[Name]' "$cutoff_date" )" \
--query "$(printf 'Buckets[?CreationDate<`%s`].[Name]' "$cutoff_date")" \
| xargs -n1 \
| grep -E 'buildkite-aws-stack-test-.*-managedsecretsbucket' \
| xargs -n1 -t -I% aws s3 rb s3://% --force
Expand All @@ -25,14 +25,14 @@ aws s3api list-buckets \
echo "--- Deleting old lambda logs after ${cutoff_date_milli}"
aws logs describe-log-groups \
--log-group-name-prefix "/aws/lambda/buildkite-aws-stack-test-" \
--query "$(printf 'logGroups[?creationTime<`%s`].[logGroupName]' "$cutoff_date_milli" )" \
--query "$(printf 'logGroups[?creationTime<`%s`].[logGroupName]' "$cutoff_date_milli")" \
--output text \
| xargs -n1 -t -I% aws logs delete-log-group --log-group-name "%"

echo "--- Deleting old cloudformation stacks"
aws cloudformation describe-stacks \
--output text \
--query "$(printf 'Stacks[?CreationTime<`%s`].[StackName]' "$cutoff_date" )" \
--query "$(printf 'Stacks[?CreationTime<`%s`].[StackName]' "$cutoff_date")" \
| xargs -n1 \
| grep -E 'buildkite-aws-stack-test-(linux|windows)-(amd64|arm64)-[[:digit:]]+|buildkite-elastic-ci-stack-service-role-[[:digit:]]+' \
| xargs -n1 -t -I% aws cloudformation delete-stack --stack-name "%"
Expand Down
38 changes: 19 additions & 19 deletions .buildkite/steps/copy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ wait_for_ami_to_be_available() {
local image_state

while true; do
image_state=$(aws ec2 describe-images --region "$region" --image-ids "$image_id" --output text --query 'Images[*].State');
image_state=$(aws ec2 describe-images --region "$region" --image-ids "$image_id" --output text --query 'Images[*].State')
echo "$image_id ($region) is $image_state"

if [[ "$image_state" == "available" ]]; then
Expand All @@ -43,10 +43,10 @@ get_image_name() {
local region="$2"

aws ec2 describe-images \
--image-ids "$image_id" \
--output text \
--region "$region" \
--query 'Images[*].Name'
--image-ids "$image_id" \
--output text \
--region "$region" \
--query 'Images[*].Name'
}

make_ami_public() {
Expand All @@ -59,7 +59,7 @@ make_ami_public() {
--launch-permission "{\"Add\": [{\"Group\":\"all\"}]}"
}

if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_BUCKET for temporary files"
exit 1
fi
Expand Down Expand Up @@ -99,17 +99,17 @@ source_region="${AWS_REGION}"
mapping_file="build/mappings.yml"

# Read the source images from meta-data if no arguments are provided
if [ $# -eq 0 ] ; then
linux_amd64_source_image_id=$(buildkite-agent meta-data get "linux_amd64_image_id")
linux_arm64_source_image_id=$(buildkite-agent meta-data get "linux_arm64_image_id")
windows_amd64_source_image_id=$(buildkite-agent meta-data get "windows_amd64_image_id")
if [ $# -eq 0 ]; then
linux_amd64_source_image_id=$(buildkite-agent meta-data get "linux_amd64_image_id")
linux_arm64_source_image_id=$(buildkite-agent meta-data get "linux_arm64_image_id")
windows_amd64_source_image_id=$(buildkite-agent meta-data get "windows_amd64_image_id")
fi

# If we're not on the main branch or a tag build skip the copy
if [[ $BUILDKITE_BRANCH != main ]] && [[ $BUILDKITE_TAG != "$BUILDKITE_BRANCH" ]] && [[ ${COPY_TO_ALL_REGIONS:-"false"} != "true" ]]; then
echo "--- Skipping AMI copy on non-main/tag branch " >&2
mkdir -p "$(dirname "$mapping_file")"
cat << EOF > "$mapping_file"
cat <<EOF >"$mapping_file"
Mappings:
AWSRegion2AMI:
${AWS_REGION} : { linuxamd64: $linux_amd64_source_image_id, linuxarm64: $linux_arm64_source_image_id, windows: $windows_amd64_source_image_id }
Expand All @@ -125,7 +125,7 @@ s3_mappings_cache=$(printf "s3://%s/mappings-%s-%s-%s-%s.yml" \
"${BUILDKITE_BRANCH}")

# Check if there is a previously copy in the cache bucket
if aws s3 cp "${s3_mappings_cache}" "$mapping_file" ; then
if aws s3 cp "${s3_mappings_cache}" "$mapping_file"; then
echo "--- Skipping AMI copy, was previously copied"
exit 0
fi
Expand All @@ -138,7 +138,7 @@ windows_amd64_source_image_name=$(get_image_name "$windows_amd64_source_image_id
# Copy to all other regions
# shellcheck disable=SC2048
for region in ${ALL_REGIONS[*]}; do
if [[ $region != "$source_region" ]] ; then
if [[ $region != "$source_region" ]]; then
echo "--- :linux: Copying Linux AMD64 $linux_amd64_source_image_id to $region" >&2
IMAGES+=("$(copy_ami_to_region "$linux_amd64_source_image_id" "$source_region" "$region" "${linux_amd64_source_image_name}-${region}")")

Expand All @@ -154,12 +154,12 @@ done

# Write yaml preamble
mkdir -p "$(dirname "$mapping_file")"
cat << EOF > "$mapping_file"
cat <<EOF >"$mapping_file"
Mappings:
AWSRegion2AMI:
EOF

echo "--- Waiting for AMIs to become available" >&2
echo "--- Waiting for AMIs to become available" >&2
# shellcheck disable=SC2048
for region in ${ALL_REGIONS[*]}; do
linux_amd64_image_id="${IMAGES[0]}"
Expand All @@ -169,29 +169,29 @@ for region in ${ALL_REGIONS[*]}; do
wait_for_ami_to_be_available "$linux_amd64_image_id" "$region" >&2

# Make the linux AMI public if it's not the source image
if [[ $linux_amd64_image_id != "$linux_amd64_source_image_id" ]] ; then
if [[ $linux_amd64_image_id != "$linux_amd64_source_image_id" ]]; then
echo ":linux: Making Linux AMD64 ${linux_amd64_image_id} public" >&2
make_ami_public "$linux_amd64_image_id" "$region"
fi

wait_for_ami_to_be_available "$linux_arm64_image_id" "$region" >&2

# Make the linux ARM AMI public if it's not the source image
if [[ $linux_arm64_image_id != "$linux_arm64_source_image_id" ]] ; then
if [[ $linux_arm64_image_id != "$linux_arm64_source_image_id" ]]; then
echo ":linux: Making Linux ARM64 ${linux_arm64_image_id} public" >&2
make_ami_public "$linux_arm64_image_id" "$region"
fi

wait_for_ami_to_be_available "$windows_amd64_image_id" "$region" >&2

# Make the windows AMI public if it's not the source image
if [[ $windows_amd64_image_id != "$windows_amd64_source_image_id" ]] ; then
if [[ $windows_amd64_image_id != "$windows_amd64_source_image_id" ]]; then
echo ":windows: Making Windows AMD64 ${windows_amd64_image_id} public" >&2
make_ami_public "$windows_amd64_image_id" "$region"
fi

# Write yaml to file
echo " $region : { linuxamd64: $linux_amd64_image_id, linuxarm64: $linux_arm64_image_id, windows: $windows_amd64_image_id }" >> "$mapping_file"
echo " $region : { linuxamd64: $linux_amd64_image_id, linuxarm64: $linux_arm64_image_id, windows: $windows_amd64_image_id }" >>"$mapping_file"

# Shift off the processed images
IMAGES=("${IMAGES[@]:3}")
Expand Down
9 changes: 4 additions & 5 deletions .buildkite/steps/delete-service-role-stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
set -euo pipefail

service_role_stack="$(buildkite-agent meta-data get service-role-stack-name)"
if [ -n "${service_role_stack}" ]
then
echo "--- Deleting service-role stack $service_role_stack"
aws cloudformation delete-stack --stack-name "$service_role_stack"
aws cloudformation wait stack-delete-complete --stack-name "$service_role_stack"
if [ -n "${service_role_stack}" ]; then
echo "--- Deleting service-role stack $service_role_stack"
aws cloudformation delete-stack --stack-name "$service_role_stack"
aws cloudformation wait stack-delete-complete --stack-name "$service_role_stack"
fi
12 changes: 6 additions & 6 deletions .buildkite/steps/delete.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ arch="${2:-amd64}"
stack_name="buildkite-aws-stack-test-${os}-${arch}-${BUILDKITE_BUILD_NUMBER}"

secrets_bucket=$(aws cloudformation describe-stacks \
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsBucket'].OutputValue" \
--output text)
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsBucket'].OutputValue" \
--output text)

secrets_logging_bucket=$(aws cloudformation describe-stacks \
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsLoggingBucket'].OutputValue" \
--output text)
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsLoggingBucket'].OutputValue" \
--output text)

echo "--- Deleting stack $stack_name"
aws cloudformation delete-stack --stack-name "$stack_name"
Expand Down
2 changes: 1 addition & 1 deletion .buildkite/steps/deploy-service-role-stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ buildkite-agent meta-data set service-role-stack-name "${stack_name}"
aws cloudformation deploy --template-file templates/service-role.yml --stack-name "${stack_name}" --region us-east-1 --capabilities CAPABILITY_IAM

role_arn="$(aws cloudformation describe-stacks --stack-name "${stack_name}" --region us-east-1 --query "Stacks[0].Outputs[?OutputKey=='RoleArn'].OutputValue" --output text)"
buildkite-agent meta-data set service-role-arn "${role_arn}"
buildkite-agent meta-data set service-role-arn "${role_arn}"
6 changes: 3 additions & 3 deletions .buildkite/steps/launch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ chmod +x ./parfait

vpc_id=$(aws ec2 describe-vpcs --filters "Name=isDefault,Values=true" --query "Vpcs[0].VpcId" --output text)
subnets=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$vpc_id" --query "Subnets[*].[SubnetId,AvailabilityZone]" --output text)
subnet_ids=$(awk '{print $1}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
az_ids=$(awk '{print $2}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
subnet_ids=$(awk '{print $1}' <<<"$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
az_ids=$(awk '{print $2}' <<<"$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')

image_id=$(buildkite-agent meta-data get "${os}_${arch}_image_id")
echo "Using AMI $image_id for $os/$arch"
Expand All @@ -35,7 +35,7 @@ if [[ "$arch" == "arm64" ]]; then
enable_instance_storage="true"
fi

cat << EOF > config.json
cat <<EOF >config.json
[
{
"ParameterKey": "BuildkiteAgentToken",
Expand Down
4 changes: 2 additions & 2 deletions .buildkite/steps/lint.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -euo pipefail

grep -rl '^#!/.*sh' . | while read -r file ; do
grep -rl '^#!/.*sh' . | while read -r file; do
[[ $file =~ \.git ]] && continue
[[ $file =~ init\.d ]] && continue
[[ $file =~ vendor ]] && continue
Expand All @@ -11,4 +11,4 @@ grep -rl '^#!/.*sh' . | while read -r file ; do
echo "Processing $file"
docker run --rm -v "$PWD:/mnt" koalaman/shellcheck "$file"
echo -e "Ok.\\n"
done
done
8 changes: 4 additions & 4 deletions .buildkite/steps/packer.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -euo pipefail

if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_BUCKET for temporary files"
exit 1
fi
Expand All @@ -10,13 +10,13 @@ os="${1:-linux}"
arch="${2:-amd64}"
agent_binary="buildkite-agent-${os}-${arch}"

if [[ "$os" == "windows" ]] ; then
if [[ "$os" == "windows" ]]; then
agent_binary+=".exe"
fi

mkdir -p "build/"

if [[ "$os" == "linux" ]] ; then
if [[ "$os" == "linux" ]]; then
buildkite-agent artifact download "build/fix-perms-linux-${arch}" ./build
mv "build/fix-perms-linux-${arch}" packer/linux/conf/buildkite-agent/scripts/fix-buildkite-agent-builds-permissions
chmod 755 packer/linux/conf/buildkite-agent/scripts/fix-buildkite-agent-builds-permissions
Expand All @@ -32,7 +32,7 @@ echo "Packer image hash for ${os}/${arch} is ${packer_hash}"
packer_file="packer-${packer_hash}-${os}-${arch}.output"

# Only build packer image if one with the same hash doesn't exist, and we're not being forced
if [[ -n "${PACKER_REBUILD:-}" ]] || ! aws s3 cp "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}" . ; then
if [[ -n "${PACKER_REBUILD:-}" ]] || ! aws s3 cp "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}" .; then
make "packer-${os}-${arch}.output"
aws s3 cp "packer-${os}-${arch}.output" "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}"
mv "packer-${os}-${arch}.output" "${packer_file}"
Expand Down
4 changes: 2 additions & 2 deletions .buildkite/steps/publish.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ s3_upload_templates() {
aws s3 cp --content-type 'text/yaml' --acl public-read build/aws-stack.yml "s3://${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}/${bucket_prefix}aws-stack.yml"
}

if [[ -z "${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_TEMPLATE_BUCKET for publishing templates to"
exit 1
fi
Expand Down Expand Up @@ -48,7 +48,7 @@ publish_for_branch() {
# Publish each build to a unique URL, to let people roll back to old versions
s3_upload_templates "${branch}/${BUILDKITE_COMMIT}."

cat << EOF | buildkite-agent annotate --style "info"
cat <<EOF | buildkite-agent annotate --style "info"
Published template <a href="https://s3.amazonaws.com/${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}/${branch}/aws-stack.yml">${branch}/aws-stack.yml</a>
EOF
}
Expand Down
2 changes: 1 addition & 1 deletion packer/linux/conf/bin/bk-check-disk-space.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
set -euo pipefail

DISK_MIN_AVAILABLE=${DISK_MIN_AVAILABLE:-5242880} # 5GB
DISK_MIN_INODES=${DISK_MIN_INODES:-250000} # docker needs lots
DISK_MIN_INODES=${DISK_MIN_INODES:-250000} # docker needs lots

DOCKER_DIR="$(jq -r '."data-root" // "/var/lib/docker"' /etc/docker/daemon.json)"

Expand Down
16 changes: 8 additions & 8 deletions packer/linux/conf/bin/bk-configure-docker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ if ! docker run \
--pull=never \
--rm \
"tonistiigi/binfmt@${QEMU_BINFMT_DIGEST}" \
--install all
then
--install all; then
echo Failed to install binfmt.
echo Avaliable docker images:
docker image ls
Expand All @@ -53,7 +52,7 @@ fi
if [[ "${DOCKER_USERNS_REMAP:-false}" == "true" ]]; then
echo Configuring user namespace remapping...

cat <<< "$(jq '."userns-remap"="buildkite-agent"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
cat <<<"$(jq '."userns-remap"="buildkite-agent"' /etc/docker/daemon.json)" >/etc/docker/daemon.json

echo Writing subuid...
cat <<EOF | tee /etc/subuid
Expand All @@ -72,7 +71,7 @@ fi

if [[ "${DOCKER_EXPERIMENTAL:-false}" == "true" ]]; then
echo Configuring experiment flag for docker daemon...
cat <<< "$(jq '.experimental=true' /etc/docker/daemon.json)" > /etc/docker/daemon.json
cat <<<"$(jq '.experimental=true' /etc/docker/daemon.json)" >/etc/docker/daemon.json
else
echo Experiment flag for docker daemon not configured.
fi
Expand All @@ -81,15 +80,16 @@ if [[ "${BUILDKITE_ENABLE_INSTANCE_STORAGE:-false}" == "true" ]]; then
echo Creating docker root directory in instance storage...
mkdir -p /mnt/ephemeral/docker
echo Configuring docker root directory to be in instance storage...
cat <<< "$(jq '."data-root"="/mnt/ephemeral/docker"' /etc/docker/daemon.json)" > /etc/docker/daemon.json
cat <<<"$(jq '."data-root"="/mnt/ephemeral/docker"' /etc/docker/daemon.json)" >/etc/docker/daemon.json
else
echo Instance storage not configured.
fi

echo Customising docker IP address pools...
cat <<<"$(jq \
'."default-address-pools"=[{"base":"172.17.0.0/12","size":20},{"base":"192.168.0.0/16","size":24}]' \
/etc/docker/daemon.json \
cat <<<"$(
jq \
'."default-address-pools"=[{"base":"172.17.0.0/12","size":20},{"base":"192.168.0.0/16","size":24}]' \
/etc/docker/daemon.json
)" >/etc/docker/daemon.json

echo Cleaning up docker images...
Expand Down
14 changes: 7 additions & 7 deletions packer/linux/conf/bin/bk-fetch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ FROM="$1"
TO="$2"

case "$FROM" in
s3://*)
exec aws s3 cp "$FROM" "$TO"
;;
*)
exec curl -Lfs -o "$TO" "$FROM"
;;
esac
s3://*)
exec aws s3 cp "$FROM" "$TO"
;;
*)
exec curl -Lfs -o "$TO" "$FROM"
;;
esac
14 changes: 8 additions & 6 deletions packer/linux/conf/bin/bk-install-elastic-stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ on_error() {
if [[ $exit_code != 0 ]]; then
if ! aws autoscaling set-instance-health \
--instance-id "$INSTANCE_ID" \
--health-status Unhealthy
then
--health-status Unhealthy; then
echo Failed to set instance health to unhealthy.
fi
fi
Expand Down Expand Up @@ -72,9 +71,9 @@ check_status() {
check_status

case $(uname -m) in
x86_64) ARCH=amd64;;
aarch64) ARCH=arm64;;
*) ARCH=unknown;;
x86_64) ARCH=amd64 ;;
aarch64) ARCH=arm64 ;;
*) ARCH=unknown ;;
esac
echo "Detected ARCH=$ARCH"

Expand Down Expand Up @@ -268,7 +267,10 @@ BUILDKITE_AGENT_TOKEN="$(
cat <<EOF >/etc/buildkite-agent/buildkite-agent.cfg
name="${BUILDKITE_STACK_NAME}-${INSTANCE_ID}-%spawn"
token="${BUILDKITE_AGENT_TOKEN}"
tags=$(IFS=, ; echo "${agent_metadata[*]}")
tags=$(
IFS=,
echo "${agent_metadata[*]}"
)
tags-from-ec2-meta-data=true
no-ansi-timestamps=${BUILDKITE_AGENT_NO_ANSI_TIMESTAMPS}
timestamp-lines=${BUILDKITE_AGENT_TIMESTAMP_LINES}
Expand Down

0 comments on commit 545ed67

Please sign in to comment.