Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a step to CI to check files have been formatted with shfmt #1232

Merged
merged 5 commits into from
Oct 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 10 additions & 0 deletions .buildkite/pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@ steps:
agents:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"

- label: ":bash: shfmt"
key: fmt
command: .buildkite/steps/shfmt.sh
agents:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"

- id: "fixperms-tests"
name: ":go: fixperms tests"
agents:
Expand Down Expand Up @@ -36,6 +42,7 @@ steps:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"
command: .buildkite/steps/deploy-service-role-stack.sh
depends_on:
- "fmt"
- "lint"
- "fixperms-tests"
- "fixperms-build"
Expand All @@ -48,6 +55,7 @@ steps:
agents:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"
depends_on:
- "fmt"
- "lint"
- "fixperms-tests"
- "fixperms-build"
Expand Down Expand Up @@ -92,6 +100,7 @@ steps:
agents:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"
depends_on:
- "fmt"
- "lint"
- "fixperms-tests"
- "fixperms-build"
Expand Down Expand Up @@ -135,6 +144,7 @@ steps:
agents:
queue: "${BUILDKITE_AGENT_META_DATA_QUEUE}"
depends_on:
- "fmt"
- "lint"
- "fixperms-tests"
- "fixperms-build"
Expand Down
7 changes: 7 additions & 0 deletions .buildkite/scripts/shfmt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/env bash

set -Eeuo pipefail

# Wrapper script for shfmt

exec docker run --rm -u "$(id -u):$(id -g)" -v "$PWD:/mnt" -w /mnt mvdan/shfmt:v3-alpine "$@"
8 changes: 4 additions & 4 deletions .buildkite/steps/cleanup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# shellcheck disable=SC2016
set -uxo pipefail

if [[ $OSTYPE =~ ^darwin ]] ; then
if [[ $OSTYPE =~ ^darwin ]]; then
cutoff_date=$(gdate --date='-1 days' +%Y-%m-%d)
cutoff_date_milli=$(gdate --date='-1 days' +%s%3N)
else
Expand All @@ -15,7 +15,7 @@ echo "--- Cleaning up resources older than ${cutoff_date}"
echo "--- Deleting test managed secrets buckets created"
aws s3api list-buckets \
--output text \
--query "$(printf 'Buckets[?CreationDate<`%s`].[Name]' "$cutoff_date" )" \
--query "$(printf 'Buckets[?CreationDate<`%s`].[Name]' "$cutoff_date")" \
| xargs -n1 \
| grep -E 'buildkite-aws-stack-test-.*-managedsecretsbucket' \
| xargs -n1 -t -I% aws s3 rb s3://% --force
Expand All @@ -25,14 +25,14 @@ aws s3api list-buckets \
echo "--- Deleting old lambda logs after ${cutoff_date_milli}"
aws logs describe-log-groups \
--log-group-name-prefix "/aws/lambda/buildkite-aws-stack-test-" \
--query "$(printf 'logGroups[?creationTime<`%s`].[logGroupName]' "$cutoff_date_milli" )" \
--query "$(printf 'logGroups[?creationTime<`%s`].[logGroupName]' "$cutoff_date_milli")" \
--output text \
| xargs -n1 -t -I% aws logs delete-log-group --log-group-name "%"

echo "--- Deleting old cloudformation stacks"
aws cloudformation describe-stacks \
--output text \
--query "$(printf 'Stacks[?CreationTime<`%s`].[StackName]' "$cutoff_date" )" \
--query "$(printf 'Stacks[?CreationTime<`%s`].[StackName]' "$cutoff_date")" \
| xargs -n1 \
| grep -E 'buildkite-aws-stack-test-(linux|windows)-(amd64|arm64)-[[:digit:]]+|buildkite-elastic-ci-stack-service-role-[[:digit:]]+' \
| xargs -n1 -t -I% aws cloudformation delete-stack --stack-name "%"
Expand Down
38 changes: 19 additions & 19 deletions .buildkite/steps/copy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ wait_for_ami_to_be_available() {
local image_state

while true; do
image_state=$(aws ec2 describe-images --region "$region" --image-ids "$image_id" --output text --query 'Images[*].State');
image_state=$(aws ec2 describe-images --region "$region" --image-ids "$image_id" --output text --query 'Images[*].State')
echo "$image_id ($region) is $image_state"

if [[ "$image_state" == "available" ]]; then
Expand All @@ -43,10 +43,10 @@ get_image_name() {
local region="$2"

aws ec2 describe-images \
--image-ids "$image_id" \
--output text \
--region "$region" \
--query 'Images[*].Name'
--image-ids "$image_id" \
--output text \
--region "$region" \
--query 'Images[*].Name'
}

make_ami_public() {
Expand All @@ -59,7 +59,7 @@ make_ami_public() {
--launch-permission "{\"Add\": [{\"Group\":\"all\"}]}"
}

if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_BUCKET for temporary files"
exit 1
fi
Expand Down Expand Up @@ -99,17 +99,17 @@ source_region="${AWS_REGION}"
mapping_file="build/mappings.yml"

# Read the source images from meta-data if no arguments are provided
if [ $# -eq 0 ] ; then
linux_amd64_source_image_id=$(buildkite-agent meta-data get "linux_amd64_image_id")
linux_arm64_source_image_id=$(buildkite-agent meta-data get "linux_arm64_image_id")
windows_amd64_source_image_id=$(buildkite-agent meta-data get "windows_amd64_image_id")
if [ $# -eq 0 ]; then
linux_amd64_source_image_id=$(buildkite-agent meta-data get "linux_amd64_image_id")
linux_arm64_source_image_id=$(buildkite-agent meta-data get "linux_arm64_image_id")
windows_amd64_source_image_id=$(buildkite-agent meta-data get "windows_amd64_image_id")
fi

# If we're not on the main branch or a tag build skip the copy
if [[ $BUILDKITE_BRANCH != main ]] && [[ $BUILDKITE_TAG != "$BUILDKITE_BRANCH" ]] && [[ ${COPY_TO_ALL_REGIONS:-"false"} != "true" ]]; then
echo "--- Skipping AMI copy on non-main/tag branch " >&2
mkdir -p "$(dirname "$mapping_file")"
cat << EOF > "$mapping_file"
cat <<EOF >"$mapping_file"
Mappings:
AWSRegion2AMI:
${AWS_REGION} : { linuxamd64: $linux_amd64_source_image_id, linuxarm64: $linux_arm64_source_image_id, windows: $windows_amd64_source_image_id }
Expand All @@ -125,7 +125,7 @@ s3_mappings_cache=$(printf "s3://%s/mappings-%s-%s-%s-%s.yml" \
"${BUILDKITE_BRANCH}")

# Check if there is a previously copy in the cache bucket
if aws s3 cp "${s3_mappings_cache}" "$mapping_file" ; then
if aws s3 cp "${s3_mappings_cache}" "$mapping_file"; then
echo "--- Skipping AMI copy, was previously copied"
exit 0
fi
Expand All @@ -138,7 +138,7 @@ windows_amd64_source_image_name=$(get_image_name "$windows_amd64_source_image_id
# Copy to all other regions
# shellcheck disable=SC2048
for region in ${ALL_REGIONS[*]}; do
if [[ $region != "$source_region" ]] ; then
if [[ $region != "$source_region" ]]; then
echo "--- :linux: Copying Linux AMD64 $linux_amd64_source_image_id to $region" >&2
IMAGES+=("$(copy_ami_to_region "$linux_amd64_source_image_id" "$source_region" "$region" "${linux_amd64_source_image_name}-${region}")")

Expand All @@ -154,12 +154,12 @@ done

# Write yaml preamble
mkdir -p "$(dirname "$mapping_file")"
cat << EOF > "$mapping_file"
cat <<EOF >"$mapping_file"
Mappings:
AWSRegion2AMI:
EOF

echo "--- Waiting for AMIs to become available" >&2
echo "--- Waiting for AMIs to become available" >&2
# shellcheck disable=SC2048
for region in ${ALL_REGIONS[*]}; do
linux_amd64_image_id="${IMAGES[0]}"
Expand All @@ -169,29 +169,29 @@ for region in ${ALL_REGIONS[*]}; do
wait_for_ami_to_be_available "$linux_amd64_image_id" "$region" >&2

# Make the linux AMI public if it's not the source image
if [[ $linux_amd64_image_id != "$linux_amd64_source_image_id" ]] ; then
if [[ $linux_amd64_image_id != "$linux_amd64_source_image_id" ]]; then
echo ":linux: Making Linux AMD64 ${linux_amd64_image_id} public" >&2
make_ami_public "$linux_amd64_image_id" "$region"
fi

wait_for_ami_to_be_available "$linux_arm64_image_id" "$region" >&2

# Make the linux ARM AMI public if it's not the source image
if [[ $linux_arm64_image_id != "$linux_arm64_source_image_id" ]] ; then
if [[ $linux_arm64_image_id != "$linux_arm64_source_image_id" ]]; then
echo ":linux: Making Linux ARM64 ${linux_arm64_image_id} public" >&2
make_ami_public "$linux_arm64_image_id" "$region"
fi

wait_for_ami_to_be_available "$windows_amd64_image_id" "$region" >&2

# Make the windows AMI public if it's not the source image
if [[ $windows_amd64_image_id != "$windows_amd64_source_image_id" ]] ; then
if [[ $windows_amd64_image_id != "$windows_amd64_source_image_id" ]]; then
echo ":windows: Making Windows AMD64 ${windows_amd64_image_id} public" >&2
make_ami_public "$windows_amd64_image_id" "$region"
fi

# Write yaml to file
echo " $region : { linuxamd64: $linux_amd64_image_id, linuxarm64: $linux_arm64_image_id, windows: $windows_amd64_image_id }" >> "$mapping_file"
echo " $region : { linuxamd64: $linux_amd64_image_id, linuxarm64: $linux_arm64_image_id, windows: $windows_amd64_image_id }" >>"$mapping_file"

# Shift off the processed images
IMAGES=("${IMAGES[@]:3}")
Expand Down
9 changes: 4 additions & 5 deletions .buildkite/steps/delete-service-role-stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@
set -euo pipefail

service_role_stack="$(buildkite-agent meta-data get service-role-stack-name)"
if [ -n "${service_role_stack}" ]
then
echo "--- Deleting service-role stack $service_role_stack"
aws cloudformation delete-stack --stack-name "$service_role_stack"
aws cloudformation wait stack-delete-complete --stack-name "$service_role_stack"
if [ -n "${service_role_stack}" ]; then
echo "--- Deleting service-role stack $service_role_stack"
aws cloudformation delete-stack --stack-name "$service_role_stack"
aws cloudformation wait stack-delete-complete --stack-name "$service_role_stack"
fi
12 changes: 6 additions & 6 deletions .buildkite/steps/delete.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ arch="${2:-amd64}"
stack_name="buildkite-aws-stack-test-${os}-${arch}-${BUILDKITE_BUILD_NUMBER}"

secrets_bucket=$(aws cloudformation describe-stacks \
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsBucket'].OutputValue" \
--output text)
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsBucket'].OutputValue" \
--output text)

secrets_logging_bucket=$(aws cloudformation describe-stacks \
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsLoggingBucket'].OutputValue" \
--output text)
--stack-name "${stack_name}" \
--query "Stacks[0].Outputs[?OutputKey=='ManagedSecretsLoggingBucket'].OutputValue" \
--output text)

echo "--- Deleting stack $stack_name"
aws cloudformation delete-stack --stack-name "$stack_name"
Expand Down
2 changes: 1 addition & 1 deletion .buildkite/steps/deploy-service-role-stack.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@ buildkite-agent meta-data set service-role-stack-name "${stack_name}"
aws cloudformation deploy --template-file templates/service-role.yml --stack-name "${stack_name}" --region us-east-1 --capabilities CAPABILITY_IAM

role_arn="$(aws cloudformation describe-stacks --stack-name "${stack_name}" --region us-east-1 --query "Stacks[0].Outputs[?OutputKey=='RoleArn'].OutputValue" --output text)"
buildkite-agent meta-data set service-role-arn "${role_arn}"
buildkite-agent meta-data set service-role-arn "${role_arn}"
6 changes: 3 additions & 3 deletions .buildkite/steps/launch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ chmod +x ./parfait

vpc_id=$(aws ec2 describe-vpcs --filters "Name=isDefault,Values=true" --query "Vpcs[0].VpcId" --output text)
subnets=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=$vpc_id" --query "Subnets[*].[SubnetId,AvailabilityZone]" --output text)
subnet_ids=$(awk '{print $1}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
az_ids=$(awk '{print $2}' <<< "$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
subnet_ids=$(awk '{print $1}' <<<"$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')
az_ids=$(awk '{print $2}' <<<"$subnets" | tr ' ' ',' | tr '\n' ',' | sed 's/,$//')

image_id=$(buildkite-agent meta-data get "${os}_${arch}_image_id")
echo "Using AMI $image_id for $os/$arch"
Expand All @@ -35,7 +35,7 @@ if [[ "$arch" == "arm64" ]]; then
enable_instance_storage="true"
fi

cat << EOF > config.json
cat <<EOF >config.json
[
{
"ParameterKey": "BuildkiteAgentToken",
Expand Down
4 changes: 2 additions & 2 deletions .buildkite/steps/lint.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -euo pipefail

grep -rl '^#!/.*sh' . | while read -r file ; do
grep -rl '^#!/.*sh' . | while read -r file; do
[[ $file =~ \.git ]] && continue
[[ $file =~ init\.d ]] && continue
[[ $file =~ vendor ]] && continue
Expand All @@ -11,4 +11,4 @@ grep -rl '^#!/.*sh' . | while read -r file ; do
echo "Processing $file"
docker run --rm -v "$PWD:/mnt" koalaman/shellcheck "$file"
echo -e "Ok.\\n"
done
done
8 changes: 4 additions & 4 deletions .buildkite/steps/packer.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -euo pipefail

if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_BUCKET for temporary files"
exit 1
fi
Expand All @@ -10,13 +10,13 @@ os="${1:-linux}"
arch="${2:-amd64}"
agent_binary="buildkite-agent-${os}-${arch}"

if [[ "$os" == "windows" ]] ; then
if [[ "$os" == "windows" ]]; then
agent_binary+=".exe"
fi

mkdir -p "build/"

if [[ "$os" == "linux" ]] ; then
if [[ "$os" == "linux" ]]; then
buildkite-agent artifact download "build/fix-perms-linux-${arch}" ./build
mv "build/fix-perms-linux-${arch}" packer/linux/conf/buildkite-agent/scripts/fix-buildkite-agent-builds-permissions
chmod 755 packer/linux/conf/buildkite-agent/scripts/fix-buildkite-agent-builds-permissions
Expand All @@ -32,7 +32,7 @@ echo "Packer image hash for ${os}/${arch} is ${packer_hash}"
packer_file="packer-${packer_hash}-${os}-${arch}.output"

# Only build packer image if one with the same hash doesn't exist, and we're not being forced
if [[ -n "${PACKER_REBUILD:-}" ]] || ! aws s3 cp "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}" . ; then
if [[ -n "${PACKER_REBUILD:-}" ]] || ! aws s3 cp "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}" .; then
make "packer-${os}-${arch}.output"
aws s3 cp "packer-${os}-${arch}.output" "s3://${BUILDKITE_AWS_STACK_BUCKET}/${packer_file}"
mv "packer-${os}-${arch}.output" "${packer_file}"
Expand Down
4 changes: 2 additions & 2 deletions .buildkite/steps/publish.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ s3_upload_templates() {
aws s3 cp --content-type 'text/yaml' --acl public-read build/aws-stack.yml "s3://${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}/${bucket_prefix}aws-stack.yml"
}

if [[ -z "${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}" ]] ; then
if [[ -z "${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}" ]]; then
echo "Must set an s3 bucket in BUILDKITE_AWS_STACK_TEMPLATE_BUCKET for publishing templates to"
exit 1
fi
Expand Down Expand Up @@ -48,7 +48,7 @@ publish_for_branch() {
# Publish each build to a unique URL, to let people roll back to old versions
s3_upload_templates "${branch}/${BUILDKITE_COMMIT}."

cat << EOF | buildkite-agent annotate --style "info"
cat <<EOF | buildkite-agent annotate --style "info"
Published template <a href="https://s3.amazonaws.com/${BUILDKITE_AWS_STACK_TEMPLATE_BUCKET}/${branch}/aws-stack.yml">${branch}/aws-stack.yml</a>
EOF
}
Expand Down
10 changes: 10 additions & 0 deletions .buildkite/steps/shfmt.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/usr/bin/env bash

set -Eeuo pipefail

SHFMT=".buildkite/scripts/shfmt"

# ignore plugins - it contains submodules
"$SHFMT" --find . \
| grep -v '^plugins/' \
| xargs "$SHFMT" --diff --binary-next-line --indent 2
2 changes: 1 addition & 1 deletion packer/linux/conf/bin/bk-check-disk-space.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
set -euo pipefail

DISK_MIN_AVAILABLE=${DISK_MIN_AVAILABLE:-5242880} # 5GB
DISK_MIN_INODES=${DISK_MIN_INODES:-250000} # docker needs lots
DISK_MIN_INODES=${DISK_MIN_INODES:-250000} # docker needs lots

DOCKER_DIR="$(jq -r '."data-root" // "/var/lib/docker"' /etc/docker/daemon.json)"

Expand Down