Skip to content

Commit

Permalink
Edge: extract the common functions and put in one place for Edge test…
Browse files Browse the repository at this point in the history
…ing. [litian]
  • Loading branch information
litian1992 committed Mar 5, 2024
1 parent 073e304 commit 46a6019
Show file tree
Hide file tree
Showing 10 changed files with 328 additions and 1,659 deletions.
196 changes: 9 additions & 187 deletions test/cases/ostree-ami-image.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,51 +4,14 @@ set -euo pipefail
# Get OS data.
source /etc/os-release
ARCH=$(uname -m)
source /usr/libexec/tests/osbuild-composer/ostree-common-functions.sh

# Provision the software under test.
/usr/libexec/osbuild-composer-test/provision.sh none

source /usr/libexec/tests/osbuild-composer/shared_lib.sh

# Start libvirtd and test it.
greenprint "🚀 Starting libvirt daemon"
sudo systemctl start libvirtd
sudo virsh list --all > /dev/null

# Install and start firewalld
greenprint "🔧 Install and start firewalld"
sudo dnf install -y firewalld
sudo systemctl enable --now firewalld

# Set a customized dnsmasq configuration for libvirt so we always get the
# same address on bootup.
sudo tee /tmp/integration.xml > /dev/null << EOF
<network>
<name>integration</name>
<uuid>1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='integration' zone='trusted' stp='on' delay='0'/>
<mac address='52:54:00:36:46:ef'/>
<ip address='192.168.100.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.100.2' end='192.168.100.254'/>
<host mac='34:49:22:B0:83:30' name='vm-bios' ip='192.168.100.50'/>
<host mac='34:49:22:B0:83:31' name='vm-uefi' ip='192.168.100.51'/>
</dhcp>
</ip>
</network>
EOF

if ! sudo virsh net-info integration > /dev/null 2>&1; then
sudo virsh net-define /tmp/integration.xml
fi
if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then
sudo virsh net-start integration
fi
CommonInit

# Set up variables.
TEST_UUID=$(uuidgen)
Expand All @@ -70,8 +33,8 @@ OBJECT_URL="http://${BUCKET_NAME}.s3.${AWS_DEFAULT_REGION}.amazonaws.com"
# Set up temporary files.
TEMPDIR=$(mktemp -d)
BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml
COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json
export COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json
export COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json

# SSH setup.
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5)
Expand Down Expand Up @@ -103,149 +66,6 @@ case "${ID}-${VERSION_ID}" in
exit 1;;
esac


# Get the compose log.
get_compose_log () {
COMPOSE_ID=$1
LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log

# Download the logs.
sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null
}

# Get the compose metadata.
get_compose_metadata () {
COMPOSE_ID=$1
METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json

# Download the metadata.
sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null

# Find the tarball and extract it.
TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")")
sudo tar -xf "$TARBALL" -C "${TEMPDIR}"
sudo rm -f "$TARBALL"

# Move the JSON file into place.
sudo cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null
}

# Build ostree image.
build_image() {
blueprint_name=$1
image_type=$2

# Get worker unit file so we can watch the journal.
WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service")
sudo journalctl -af -n 1 -u "${WORKER_UNIT}" &
WORKER_JOURNAL_PID=$!
# Stop watching the worker journal when exiting.
trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT

# Start the compose.
greenprint "🚀 Starting compose"
if [ $# -eq 3 ]; then
repo_url=$3
sudo composer-cli compose start-ostree \
--json \
--ref "$OSTREE_REF" \
--url "$repo_url" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
else
sudo composer-cli compose start-ostree \
--json \
--ref "$OSTREE_REF" "$blueprint_name" "$image_type" | tee "$COMPOSE_START"
fi
COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START")

# Wait for the compose to finish.
greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}"
while true; do
sudo composer-cli compose info \
--json \
"${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null
COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO")

# Is the compose finished?
if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then
break
fi

# Wait 30 seconds and try again.
sleep 5
done

# Capture the compose logs from osbuild.
greenprint "💬 Getting compose log and metadata"
get_compose_log "$COMPOSE_ID"
get_compose_metadata "$COMPOSE_ID"

# Kill the journal monitor immediately and remove the trap
sudo pkill -P ${WORKER_JOURNAL_PID}
trap - EXIT

# Did the compose finish with success?
if [[ $COMPOSE_STATUS != FINISHED ]]; then
redprint "Something went wrong with the compose. 😢"
exit 1
fi
}

# Wait for the ssh server up to be.
wait_for_ssh_up () {
SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" admin@"${1}" '/bin/bash -c "echo -n READY"')
if [[ $SSH_STATUS == READY ]]; then
echo 1
else
echo 0
fi
}

# Clean up our mess.
clean_up () {
greenprint "🧼 Cleaning up"

# Clear integration network
sudo virsh net-destroy integration
sudo virsh net-undefine integration

# Remove any status containers if exist
sudo podman ps -a -q --format "{{.ID}}" | sudo xargs --no-run-if-empty podman rm -f
# Remove all images
sudo podman rmi -f -a

# Remove prod repo
sudo rm -rf "$PROD_REPO"

# Remomve tmp dir.
sudo rm -rf "$TEMPDIR"

# Stop prod repo http service
sudo systemctl disable --now httpd

# Deregister edge AMI image
aws ec2 deregister-image \
--image-id "${AMI_ID}"

# Remove snapshot
aws ec2 delete-snapshot \
--snapshot-id "${SNAPSHOT_ID}"

# Delete Key Pair
aws ec2 delete-key-pair \
--key-name "${AMI_KEY_NAME}"

# Terminate running instance
if [[ -v INSTANCE_ID ]]; then
aws ec2 terminate-instances \
--instance-ids "${INSTANCE_ID}"
aws ec2 wait instance-terminated \
--instance-ids "${INSTANCE_ID}"
fi

# Remove bucket content and bucket itself quietly
aws s3 rb "${BUCKET_URL}" --force > /dev/null
}

# Test result checking
check_result () {
greenprint "🎏 Checking for test result"
Expand All @@ -254,6 +74,7 @@ check_result () {
else
redprint "❌ Failed"
clean_up
aws_clean_up
exit 1
fi
}
Expand Down Expand Up @@ -423,7 +244,7 @@ sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve container

# Build container image.
build_image container "${CONTAINER_TYPE}"
build_image -b container -t "${CONTAINER_TYPE}"

# Download the image
greenprint "📥 Downloading the container image"
Expand Down Expand Up @@ -620,7 +441,7 @@ sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve ami

# Build ami.
build_image ami "${AMI_IMAGE_TYPE}" "${PROD_REPO_URL}"
build_image -b ami -t "${AMI_IMAGE_TYPE}" -u "${PROD_REPO_URL}"

# Download the image
greenprint "📥 Downloading the ami image"
Expand Down Expand Up @@ -935,7 +756,7 @@ sudo composer-cli blueprints push "$BLUEPRINT_FILE"
sudo composer-cli blueprints depsolve upgrade

# Build upgrade image.
build_image upgrade "${CONTAINER_TYPE}" "$PROD_REPO_URL"
build_image -b upgrade -t "${CONTAINER_TYPE}" -u "$PROD_REPO_URL"

# Download the image
greenprint "📥 Downloading the upgrade image"
Expand Down Expand Up @@ -1063,5 +884,6 @@ check_result

# Final success clean up
clean_up
aws_clean_up

exit 0

0 comments on commit 46a6019

Please sign in to comment.