diff --git a/hack/testdata/pod-restricted-localhost.yaml b/hack/testdata/pod-restricted-localhost.yaml new file mode 100644 index 000000000000..adc93d9fc12c --- /dev/null +++ b/hack/testdata/pod-restricted-localhost.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + run: target + name: target +spec: + securityContext: + seccompProfile: + type: Localhost + localhostProfile: dummy.json + containers: + - image: busybox + name: target + command: ["/bin/sh", "-c", "sleep 100"] + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" diff --git a/hack/testdata/pod-restricted-runtime-default.yaml b/hack/testdata/pod-restricted-runtime-default.yaml new file mode 100644 index 000000000000..a7bc9b6f8c5b --- /dev/null +++ b/hack/testdata/pod-restricted-runtime-default.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + run: target + name: target +spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - image: busybox + name: target + command: ["/bin/sh", "-c", "sleep 100"] + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go index 8d92dc772aa3..3febf5210f39 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go @@ -289,6 +289,8 @@ func TestGenerateDebugContainer(t *testing.T) { Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, @@ -1274,10 +1276,12 @@ func TestGeneratePodCopyWithDebugContainer(t *testing.T) { Image: "busybox", ImagePullPolicy: corev1.PullIfNotPresent, SecurityContext: &corev1.SecurityContext{ + RunAsNonRoot: pointer.Bool(true), Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, - RunAsNonRoot: pointer.Bool(true), + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, @@ -1646,6 +1650,8 @@ func TestGenerateNodeDebugPod(t *testing.T) { Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go index 3684478d5708..656f92bf8b81 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go @@ -176,6 +176,8 @@ func (p *restrictedProfile) Apply(pod *corev1.Pod, containerName string, target clearSecurityContext(pod, containerName) disallowRoot(pod, containerName) dropCapabilities(pod, containerName) + disallowPrivilegeEscalation(pod, containerName) + setSeccompProfile(pod, containerName) switch style { case podCopy: @@ -343,3 +345,31 @@ func addCapability(c *corev1.Container, capability corev1.Capability) { } c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, capability) } + +// disallowPrivilegeEscalation configures the containers not allowed PrivilegeEscalation +func disallowPrivilegeEscalation(p *corev1.Pod, containerName string) { + podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { + if c.Name != containerName { + return true + } + if c.SecurityContext == nil { + c.SecurityContext = &corev1.SecurityContext{} + } + c.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + return false + }) +} + +// setSeccompProfile apply SeccompProfile to the containers +func setSeccompProfile(p *corev1.Pod, containerName string) { + podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { + if c.Name != containerName { + return true + } + if c.SecurityContext == nil { + c.SecurityContext = &corev1.SecurityContext{} + } + c.SecurityContext.SeccompProfile = &corev1.SeccompProfile{Type: "RuntimeDefault"} + return false + }) +} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go index a397fa272ad0..fa45f5105532 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go @@ -347,6 +347,8 @@ func TestRestrictedProfile(t *testing.T) { Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, @@ -386,6 +388,8 @@ func TestRestrictedProfile(t *testing.T) { Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, @@ -404,6 +408,8 @@ func TestRestrictedProfile(t *testing.T) { Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, @@ -423,6 +429,8 @@ func TestRestrictedProfile(t *testing.T) { Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + AllowPrivilegeEscalation: pointer.Bool(false), + SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"}, }, }, }, diff --git a/test/cmd/debug.sh b/test/cmd/debug.sh index f53b17a0b9dd..0e6507c3bb38 100755 --- a/test/cmd/debug.sh +++ b/test/cmd/debug.sh @@ -280,3 +280,174 @@ run_kubectl_debug_baseline_node_tests() { set +o nounset set +o errexit } + +run_kubectl_debug_restricted_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile restricted" + + ### Pod Troubleshooting by ephemeral containers with restricted profile + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile just works in not restricted namespace + # Command: add a new debug container with restricted profile + output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted "${kube_flags[@]:?}") + kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity' + # Post-Conditions + kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + # Clean up + kubectl delete pod target "${kube_flags[@]:?}" + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile just works in not restricted namespace + # Command: create a copy of target with a new debug container + kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" + # Clean up + kubectl delete pod target target-copy "${kube_flags[@]:?}" + + ns_name="namespace-restricted" + # Command: create namespace and add a label + kubectl create namespace "${ns_name}" + kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted + output_message=$(kubectl get namespaces "${ns_name}" --show-labels) + kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + + # Pre-Condition: Pod "busybox" is created that complies with the restricted policy + kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile works when pod's seccompProfile is RuntimeDefault + # Command: add a new debug container with restricted profile + output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n "${ns_name}" "${kube_flags[@]:?}") + kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity' + # Post-Conditions + kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + # Clean up + kubectl delete pod target -n "${ns_name}" "${kube_flags[@]:?}" + + # Pre-Condition: Pod "nginx" is created + kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile works when pod's seccompProfile is RuntimeDefault + # Command: create a copy of target with a new debug container + kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:" + # Clean up + kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}" + + # Pre-Condition: Pod "busybox" is created that complies with the restricted policy + kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile works when pod's seccompProfile is Localhost + # Command: add a new debug container with restricted profile + output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n ${ns_name} "${kube_flags[@]:?}") + kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity' + # Post-Conditions + kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + # Clean up + kubectl delete pod target -n ${ns_name} "${kube_flags[@]:?}" + + # Pre-Condition: Pod "nginx" is created + kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Restricted profile works when pod's seccompProfile is Localhost + # Command: create a copy of target with a new debug container + kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:" + # Clean up + kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}" + + # Clean up restricted namespace + kubectl delete namespace "${ns_name}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_restricted_node_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile restricted (node)" + + ### Debug node with restrected profile + + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Restricted profile just works in not restricted namespace + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true) + kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity' + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostIPC}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]' + kube::test::get_object_assert "pod/${debugger:?}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + + ns_name="namespace-restricted" + # Command: create namespace and add a label + kubectl create namespace "${ns_name}" + kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted + output_message=$(kubectl get namespaces "${ns_name}" --show-labels) + kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Restricted profile works in restricted namespace + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false -n ${ns_name} "${kube_flags[@]:?}" -- true) + kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity' + # Post-Conditions + kube::test::get_object_assert "pod -n ${ns_name}" "{{(len .items)}}" '1' + debugger=$(kubectl get pod -n ${ns_name} -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostIPC}}' '' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostNetwork}}' '' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostPID}}' '' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' '' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true' + kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" -n ${ns_name} "${kube_flags[@]:?}" + + # Clean up restricted namespace + kubectl delete namespace "${ns_name}" + + set +o nounset + set +o errexit +} \ No newline at end of file diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index 38d6e0a4c402..685fe415b923 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -1013,11 +1013,13 @@ runTests() { record_command run_kubectl_debug_pod_tests record_command run_kubectl_debug_general_tests record_command run_kubectl_debug_baseline_tests + record_command run_kubectl_debug_restricted_tests fi if kube::test::if_supports_resource "${nodes}" ; then record_command run_kubectl_debug_node_tests record_command run_kubectl_debug_general_node_tests record_command run_kubectl_debug_baseline_node_tests + record_command run_kubectl_debug_restricted_node_tests fi cleanup_tests