Skip to content

Commit

Permalink
8337: add test cases for guest pull images
Browse files Browse the repository at this point in the history
- add test cases for guest pull images
- need revist after we use container2.0 with 'image pull per runtime class' feature

for kata-containers#8337 and kata-containers#8407

Signed-off-by: Da Li Liu <liudali@cn.ibm.com>
  • Loading branch information
liudalibj committed Dec 15, 2023
1 parent c05a4ec commit 0ef0ed8
Show file tree
Hide file tree
Showing 4 changed files with 226 additions and 22 deletions.
162 changes: 145 additions & 17 deletions tests/integration/kubernetes/k8s-guest-pull-image.bats
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/usr/bin/env bats
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
Expand All @@ -9,30 +10,157 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"

setup() {
[[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest"
[[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest"
setup_common
get_pod_config_dir
unencrypted_image_1="quay.io/prometheus/busybox:latest"
unencrypted_image_2="alpine:latest"
}

@test "Test can pull an unencrypted image inside the guest" {
pod_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")"
@test "Test we can pull an unencrypted image outside the guest with runc and then inside the guest successfully" {
# 1. Create one runc pod with the $unencrypted_image_1 image
# We want to have one runc pod, so we pass a fake runtimeclass "runc" and then delete the runtimeClassName,
# because the runtimeclass is not optional in new_pod_config function.
runc_pod_config="$(new_pod_config "$unencrypted_image_1" "runc" "$node")"
sed -i '/runtimeClassName:/d' $runc_pod_config

kubectl create -f "${pod_config}"
set_container_command "$runc_pod_config" "0" "sleep" "30"

# Get pod specification
kubectl wait --for=condition=Ready --timeout=$timeout pod "test-e2e"
# For debug sake
echo "Pod $runc_pod_config file:"
cat $runc_pod_config

echo "Check the image was not pulled in the host"
local pod_id=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}')
rootfs=($(find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared \
-name rootfs))
k8s_create_pod "$runc_pod_config"

[ ${#rootfs[@]} -le 1 ]
echo "Runc pod test-e2e is running"
kubectl delete -f "$runc_pod_config"

# 2. Create one kata pod with the $unencrypted_image_1 image and nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}" "$node")"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"

# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"

# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config

k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"

echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"
}

teardown() {
[[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest"
@test "Test we can pull an unencrypted image inside the guest twice in a row and then outside the guest successfully" {
# 1. Create one kata pod with the $unencrypted_image_1 image and nydus annotation twice
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}" "$node")"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"

# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"

# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config

k8s_create_pod "$kata_pod_with_nydus_config"

echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"

sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"

kubectl delete -f $kata_pod_with_nydus_config

# 2. Create one kata pod with the $unencrypted_image_1 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}" "$node")"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"

# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config

k8s_create_pod "$kata_pod_without_nydus_config"

echo "Kata pod test-e2e without nydus annotation is running"
echo "Check the image was not pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"

kubectl describe -f "${pod_config}" || true
kubectl delete -f "${pod_config}" || true
}
# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "2" but the found count of rootfs in host is "1"
# As the the first time we pull the $unencrypted_image_1 image via nydus-snapshotter in the guest
# for all subsequent pulls still use nydus-snapshotter in the gust
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "2"
}

@test "Test we can pull an other unencrypted image outside the guest and then inside the guest successfully" {
# 1. Create one kata pod with the $unencrypted_image_2 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}" "$node")"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"

# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config

k8s_create_pod "$kata_pod_without_nydus_config"

echo "Kata pod test-e2e without nydus annotation is running"
echo "Checking the image was pulled in the host"

sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# Without annotation for nydus, both rootfs for pause and the test container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "2"

kubectl delete -f $kata_pod_without_nydus_config

# 2. Create one kata pod with the $unencrypted_image_2 image and with nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}" "$node")"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"

# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"

# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config

k8s_create_pod "$kata_pod_with_nydus_config"

echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"

# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "1" but the found count of rootfs in host is "2"
# As the the first time we pull the $unencrypted_image_2 image via overlayfs-snapshotter in host
# for all subsequent pulls still use overlayfs-snapshotter in host.
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "1"
}

teardown() {
[[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest"
k8s_delete_all_pods_if_any_exists || true
}
4 changes: 1 addition & 3 deletions tests/integration/kubernetes/k8s-measured-rootfs.bats
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ teardown() {
}

@test "Test cannnot launch pod with measured boot enabled and incorrect hash" {
pod_config="$(new_pod_config nginx "kata-${KATA_HYPERVISOR}")"
pod_config="$(new_pod_config nginx "kata-${KATA_HYPERVISOR}" "$node")"

incorrect_hash="5180b1568c2ba972e4e06ee0a55976acae8329f2a5d1d2004395635e1ec4a76e"

Expand All @@ -38,8 +38,6 @@ teardown() {
set_metadata_annotation "$pod_config" \
"io.katacontainers.config.hypervisor.kernel_params" \
"rootfs_verity.scheme=dm-verity rootfs_verity.hash=$incorrect_hash"
# Run on a specific node so we know from where to inspect the logs
set_node "$pod_config" "$node"

# For debug sake
echo "Pod $pod_config file:"
Expand Down
76 changes: 75 additions & 1 deletion tests/integration/kubernetes/lib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,41 @@ assert_pod_fail() {
! k8s_create_pod "$container_config" || /bin/false
}


# Check the pulled rootfs on host for given node and sandbox_id
#
# Parameters:
# $1 - the k8s worker node name
# $2 - the sandbox id for kata container
# $3 - the expected count of pulled rootfs
#
assert_rootfs_count() {
local node="$1"
local sandbox_id="$2"
local expect_count="$3"
local allrootfs=""
# Max loop 3 times to get all pulled rootfs for given sandbox_id
for _ in {1..3}
do
allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs")
if [ -n "$allrootfs" ]; then
break
else
sleep 1
fi
done
echo "allrootfs is: $allrootfs"
count=$(echo $allrootfs | grep -o "rootfs" | wc -l)
echo "count of container rootfs in host is: $count, expect count is: $expect_count"
[ $expect_count -eq $count ]
}

# Create a pod configuration out of a template file.
#
# Parameters:
# $1 - the container image.
# $2 - the runtimeclass
# $2 - the runtimeclass, is not optional.
# $3 - the specific node name, optional.
#
# Return:
# the path to the configuration file. The caller should not care about
Expand All @@ -109,13 +139,19 @@ new_pod_config() {
local base_config="${FIXTURES_DIR}/pod-config.yaml.in"
local image="$1"
local runtimeclass="$2"
local node="$3"
local new_config

# The runtimeclass is not optional.
[ -n "$runtimeclass" ] || return 1

new_config=$(mktemp "${BATS_FILE_TMPDIR}/$(basename "${base_config}").XXX")
IMAGE="$image" RUNTIMECLASS="$runtimeclass" envsubst < "$base_config" > "$new_config"

# The node parameter is optional.
# Run on a specific node so we know from where to inspect the logs or check container rootfs
[ -n "$node" ] && set_node "$new_config" "$node"

echo "$new_config"
}

Expand Down Expand Up @@ -150,6 +186,22 @@ set_metadata_annotation() {
yq w -i --style=double "${yaml}" "${annotation_key}" "${value}"
}

# Set the command for container spec.
#
# Parameters:
# $1 - the yaml file
# $2 - the index of the container
# $N - the command values
#
set_container_command() {
local yaml="${1}"
local container_idx="${2}"
shift 2
for command_value in "$@"; do
yq w -i "${yaml}" "spec.containers[${container_idx}].command[+]" --tag '!!str' "${command_value}"
done
}

# Set the node name on configuration spec.
#
# Parameters:
Expand Down Expand Up @@ -183,3 +235,25 @@ print_node_journal() {
kubectl get pods -o name | grep "node-debugger-${node}" | \
xargs kubectl delete > /dev/null
}


# Get the sandbox id for kata container from a worker node
#
# Parameters:
# $1 - the k8s worker node name
#
get_node_kata_sandbox_id() {
local node="$1"
local kata_sandbox_id=""
# Max loop 3 times to get kata_sandbox_id
for _ in {1..3}
do
kata_sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}')
if [ -n "$kata_sandbox_id" ]; then
break
else
sleep 1
fi
done
echo $kata_sandbox_id
}
6 changes: 5 additions & 1 deletion tests/integration/kubernetes/run_kubernetes_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,12 @@ ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY:-$(base64 -w 0 runtimeclass_workloads_work/
if [ -n "${K8S_TEST_UNION:-}" ]; then
K8S_TEST_UNION=($K8S_TEST_UNION)
else
# Before we use containerd 2.0 with 'image pull per runtime class' feature
# we need run k8s-guest-pull-image.bats test first, otherwise the test result will be affected
# by other cases which are using 'alpine' and 'quay.io/prometheus/busybox:latest' image.
# more details https://github.com/kata-containers/kata-containers/issues/8337
K8S_TEST_SMALL_HOST_UNION=( \
"k8s-guest-pull-image.bats" \
"k8s-confidential.bats" \
"k8s-attach-handlers.bats" \
"k8s-caps.bats" \
Expand All @@ -33,7 +38,6 @@ else
"k8s-env.bats" \
"k8s-exec.bats" \
"k8s-file-volume.bats" \
"k8s-guest-pull-image.bats" \
"k8s-inotify.bats" \
"k8s-job.bats" \
"k8s-kill-all-process-in-container.bats" \
Expand Down

0 comments on commit 0ef0ed8

Please sign in to comment.