diff --git a/tests/integration/kubernetes/k8s-guest-pull-image.bats b/tests/integration/kubernetes/k8s-guest-pull-image.bats index 7bd4b881758a..e572dc312bc5 100644 --- a/tests/integration/kubernetes/k8s-guest-pull-image.bats +++ b/tests/integration/kubernetes/k8s-guest-pull-image.bats @@ -1,5 +1,6 @@ #!/usr/bin/env bats # Copyright (c) 2023 Intel Corporation +# Copyright (c) 2023 IBM Corporation # # SPDX-License-Identifier: Apache-2.0 # @@ -11,28 +12,142 @@ load "${BATS_TEST_DIRNAME}/tests_common.sh" setup() { [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" setup_common + get_pod_config_dir +} + +@test "Test can pull an unencrypted image outside the guest with runc" { + pod_name="busybox-pod-runc" + set_node "${pod_config_dir}/busybox-pod-runc.yaml" "$node" + kubectl create -f "${pod_config_dir}/busybox-pod-runc.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + + echo "Runc pod $pod_name is running" } @test "Test can pull an unencrypted image inside the guest" { - pod_config="$(new_pod_config quay.io/prometheus/busybox:latest "kata-${KATA_HYPERVISOR}")" + pod_name="busybox-pod-with-nydus" + set_node "${pod_config_dir}/busybox-pod-with-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/busybox-pod-with-nydus.yaml" - kubectl create -f "${pod_config}" + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name with nydus annotation is running" + echo "Check the image was not pulled in the host" + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -le 1 ] +} + +@test "Test can pull an unencrypted image inside the guest again" { + pod_name="busybox-pod-with-nydus" + set_node "${pod_config_dir}/busybox-pod-with-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/busybox-pod-with-nydus.yaml" # Get pod specification - kubectl wait --for=condition=Ready --timeout=$timeout pod "test-e2e" + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name with nydus annotation is running" + echo "Check the image was not pulled in the host" + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -le 1 ] +} + +@test "Test can pull an unencrypted image outside the guest" { + pod_name="busybox-pod-without-nydus" + set_node "${pod_config_dir}/busybox-pod-without-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/busybox-pod-without-nydus.yaml" + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name without nydus annotation is running" + + # TODO: + # The the first time we pull the busybox image via overlayfs-snapshotter, for all subsequent pulls still use overlayfs-snapshotter + # More details: https://github.com/kata-containers/kata-containers/issues/8337 + # The behavior should be updated after we use containerd 2.0 with 'image pull per runtime class' feature: + # https://github.com/containerd/containerd/issues/9377 echo "Check the image was not pulled in the host" - local pod_id=$(kubectl get pods -o jsonpath='{.items..metadata.name}') - sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') - rootfs=($(find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared \ - -name rootfs)) + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -le 1 ] +} - [ ${#rootfs[@]} -le 1 ] +@test "Test pull an other unencrypted image outside the guest" { + pod_name="alpine-pod-without-nydus" + set_node "${pod_config_dir}/alpine-pod-without-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/alpine-pod-without-nydus.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name without nydus annotation is running" + echo "Check the image was pulled in the host" + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -eq 2 ] +} + +@test "Test pull an other unencrypted image outside the guest again" { + pod_name="alpine-pod-without-nydus" + set_node "${pod_config_dir}/alpine-pod-without-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/alpine-pod-without-nydus.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name without nydus annotation is running" + echo "Check the image was pulled in the host" + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -eq 2 ] +} + +@test "Test pull an other unencrypted image inside the guest" { + pod_name="alpine-pod-with-nydus" + set_node "${pod_config_dir}/alpine-pod-with-nydus.yaml" "$node" + kubectl create -f "${pod_config_dir}/alpine-pod-with-nydus.yaml" + + # Get pod specification + kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name" + echo "Kata pod $pod_name with nydus annotation is running" + + # TODO: + # The the first time we pull the alpine image via overlayfs-snapshotter, for all subsequent pulls still use overlayfs-snapshotter + # More details: https://github.com/kata-containers/kata-containers/issues/8337 + # The behavior should be updated after we use containerd 2.0 with 'image pull per runtime class' feature: + # https://github.com/containerd/containerd/issues/9377 + + echo "Check the image was pulled in the host" + sandbox_id=$(exec_host $node "ps -ef | grep containerd-shim-kata-v2" | egrep -o "\s\-id [a-z0-9]+" | awk '{print $2}') + echo "sandbox_id is $sandbox_id" + allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id}/shared -name rootfs") + echo "allrootfs is $allrootfs" + count=$(echo $allrootfs | grep -o "rootfs" | wc -l) + echo "count is $count" + [ $count -eq 2 ] } teardown() { [[ "${PULL_TYPE}" =~ "guest-pull" ]] || skip "Test only working for pulling image inside the guest" - - kubectl describe -f "${pod_config}" || true - kubectl delete -f "${pod_config}" || true -} \ No newline at end of file + kubectl delete -f $pod_name || true +} diff --git a/tests/integration/kubernetes/run_kubernetes_tests.sh b/tests/integration/kubernetes/run_kubernetes_tests.sh index 235ed6bea902..acf5a89172d6 100644 --- a/tests/integration/kubernetes/run_kubernetes_tests.sh +++ b/tests/integration/kubernetes/run_kubernetes_tests.sh @@ -20,7 +20,12 @@ ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY:-$(base64 -w 0 runtimeclass_workloads_work/ if [ -n "${K8S_TEST_UNION:-}" ]; then K8S_TEST_UNION=($K8S_TEST_UNION) else + # Before we use containerd 2.0 with 'image pull per runtime class' feature + # we need run k8s-guest-pull-image.bats test first, otherwise the test result will be affected + # by other cases which are using 'alpine' and 'quay.io/prometheus/busybox:latest' image. + # more details https://github.com/kata-containers/kata-containers/issues/8337 K8S_TEST_SMALL_HOST_UNION=( \ + "k8s-guest-pull-image.bats" \ "k8s-confidential.bats" \ "k8s-attach-handlers.bats" \ "k8s-caps.bats" \ @@ -33,7 +38,6 @@ else "k8s-env.bats" \ "k8s-exec.bats" \ "k8s-file-volume.bats" \ - "k8s-guest-pull-image.bats" \ "k8s-inotify.bats" \ "k8s-job.bats" \ "k8s-kill-all-process-in-container.bats" \ diff --git a/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-with-nydus.yaml b/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-with-nydus.yaml new file mode 100644 index 000000000000..ac4606a588fc --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-with-nydus.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2023 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: alpine-pod-with-nydus + annotations: + io.containerd.cri.runtime-handler: kata +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: alpine-container-with-nydus + image: alpine + command: + - "sleep" + - "30" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-without-nydus.yaml b/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-without-nydus.yaml new file mode 100644 index 000000000000..a01bd58ce486 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/alpine-pod-without-nydus.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2023 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: alpine-pod-without-nydus +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: alpine-container-without-nydus + image: alpine + command: + - "sleep" + - "3000" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-runc.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-runc.yaml new file mode 100644 index 000000000000..ed230f147cb4 --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-runc.yaml @@ -0,0 +1,18 @@ +# +# Copyright (c) 2023 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox-pod-runc +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + containers: + - name: busybox-container-runc + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "30" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-with-nydus.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-with-nydus.yaml new file mode 100644 index 000000000000..63ba6e7c42ef --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-with-nydus.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2023 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox-pod-with-nydus + annotations: + io.containerd.cri.runtime-handler: kata +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: busybox-container-with-nydus + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "30" diff --git a/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-without-nydus.yaml b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-without-nydus.yaml new file mode 100644 index 000000000000..1faf2b58b97e --- /dev/null +++ b/tests/integration/kubernetes/runtimeclass_workloads/busybox-pod-without-nydus.yaml @@ -0,0 +1,19 @@ +# +# Copyright (c) 2023 IBM Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +apiVersion: v1 +kind: Pod +metadata: + name: busybox-pod-without-nydus +spec: + terminationGracePeriodSeconds: 0 + shareProcessNamespace: true + runtimeClassName: kata + containers: + - name: busybox-container-without-nydus + image: quay.io/prometheus/busybox:latest + command: + - sleep + - "30"