Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: Restricted profile comply with PSS #117543

Merged
merged 5 commits into from May 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 23 additions & 0 deletions hack/testdata/pod-restricted-localhost.yaml
@@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: target
name: target
spec:
securityContext:
seccompProfile:
type: Localhost
localhostProfile: dummy.json
containers:
- image: busybox
name: target
command: ["/bin/sh", "-c", "sleep 100"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
22 changes: 22 additions & 0 deletions hack/testdata/pod-restricted-runtime-default.yaml
@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: target
name: target
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- image: busybox
name: target
command: ["/bin/sh", "-c", "sleep 100"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
8 changes: 7 additions & 1 deletion staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go
Expand Up @@ -289,6 +289,8 @@ func TestGenerateDebugContainer(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand Down Expand Up @@ -1274,10 +1276,12 @@ func TestGeneratePodCopyWithDebugContainer(t *testing.T) {
Image: "busybox",
ImagePullPolicy: corev1.PullIfNotPresent,
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: pointer.Bool(true),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
RunAsNonRoot: pointer.Bool(true),
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand Down Expand Up @@ -1646,6 +1650,8 @@ func TestGenerateNodeDebugPod(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand Down
30 changes: 30 additions & 0 deletions staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go
Expand Up @@ -176,6 +176,8 @@ func (p *restrictedProfile) Apply(pod *corev1.Pod, containerName string, target
clearSecurityContext(pod, containerName)
disallowRoot(pod, containerName)
dropCapabilities(pod, containerName)
disallowPrivilegeEscalation(pod, containerName)
setSeccompProfile(pod, containerName)

switch style {
case podCopy:
Expand Down Expand Up @@ -343,3 +345,31 @@ func addCapability(c *corev1.Container, capability corev1.Capability) {
}
c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, capability)
}

// disallowPrivilegeEscalation configures the containers not allowed PrivilegeEscalation
func disallowPrivilegeEscalation(p *corev1.Pod, containerName string) {
podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool {
if c.Name != containerName {
return true
}
if c.SecurityContext == nil {
c.SecurityContext = &corev1.SecurityContext{}
}
c.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false)
return false
})
}

// setSeccompProfile apply SeccompProfile to the containers
func setSeccompProfile(p *corev1.Pod, containerName string) {
podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool {
if c.Name != containerName {
return true
}
if c.SecurityContext == nil {
c.SecurityContext = &corev1.SecurityContext{}
}
c.SecurityContext.SeccompProfile = &corev1.SeccompProfile{Type: "RuntimeDefault"}
mochizuki875 marked this conversation as resolved.
Show resolved Hide resolved
return false
})
}
8 changes: 8 additions & 0 deletions staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go
Expand Up @@ -347,6 +347,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand Down Expand Up @@ -386,6 +388,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand All @@ -404,6 +408,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand All @@ -423,6 +429,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
Expand Down
171 changes: 171 additions & 0 deletions test/cmd/debug.sh
Expand Up @@ -280,3 +280,174 @@ run_kubectl_debug_baseline_node_tests() {
set +o nounset
set +o errexit
}

run_kubectl_debug_restricted_tests() {
mochizuki875 marked this conversation as resolved.
Show resolved Hide resolved
set -o nounset
set -o errexit

create_and_use_new_namespace
kube::log::status "Testing kubectl debug profile restricted"

### Pod Troubleshooting by ephemeral containers with restricted profile

# Pre-Condition: Pod "nginx" is created
kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}"
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile just works in not restricted namespace
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target "${kube_flags[@]:?}"

# Pre-Condition: Pod "nginx" is created
kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}"
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile just works in not restricted namespace
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:"
# Clean up
kubectl delete pod target target-copy "${kube_flags[@]:?}"

ns_name="namespace-restricted"
# Command: create namespace and add a label
kubectl create namespace "${ns_name}"
kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted
output_message=$(kubectl get namespaces "${ns_name}" --show-labels)
kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted'

# Pre-Condition: Pod "busybox" is created that complies with the restricted policy
kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is RuntimeDefault
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n "${ns_name}" "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target -n "${ns_name}" "${kube_flags[@]:?}"

# Pre-Condition: Pod "nginx" is created
kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is RuntimeDefault
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:"
# Clean up
kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}"

# Pre-Condition: Pod "busybox" is created that complies with the restricted policy
kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is Localhost
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n ${ns_name} "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target -n ${ns_name} "${kube_flags[@]:?}"

# Pre-Condition: Pod "nginx" is created
kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is Localhost
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:"
# Clean up
kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}"

# Clean up restricted namespace
kubectl delete namespace "${ns_name}"

set +o nounset
set +o errexit
}

run_kubectl_debug_restricted_node_tests() {
set -o nounset
set -o errexit

create_and_use_new_namespace
kube::log::status "Testing kubectl debug profile restricted (node)"

### Debug node with restrected profile

# Pre-Condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
# Restricted profile just works in not restricted namespace
# Command: create a new node debugger pod
output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true)
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert pod "{{(len .items)}}" '1'
debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}")
kube::test::if_has_string "${output_message:?}" "${debugger:?}"
kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostIPC}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]'
kube::test::get_object_assert "pod/${debugger:?}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' ''
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault'
# Clean up
# pod.spec.nodeName is set by kubectl debug node which causes the delete to hang,
# presumably waiting for a kubelet that's not present. Force the delete.
kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}"

ns_name="namespace-restricted"
# Command: create namespace and add a label
kubectl create namespace "${ns_name}"
kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted
output_message=$(kubectl get namespaces "${ns_name}" --show-labels)
kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted'

# Pre-Condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
# Restricted profile works in restricted namespace
# Command: create a new node debugger pod
output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false -n ${ns_name} "${kube_flags[@]:?}" -- true)
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{(len .items)}}" '1'
debugger=$(kubectl get pod -n ${ns_name} -o go-template="{{(index .items 0)${id_field:?}}}")
kube::test::if_has_string "${output_message:?}" "${debugger:?}"
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" "{{${image_field:?}}}" 'busybox'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.nodeName}}' '127.0.0.1'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostIPC}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostNetwork}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostPID}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' ''
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault'
# Clean up
# pod.spec.nodeName is set by kubectl debug node which causes the delete to hang,
# presumably waiting for a kubelet that's not present. Force the delete.
kubectl delete --force pod "${debugger:?}" -n ${ns_name} "${kube_flags[@]:?}"

# Clean up restricted namespace
kubectl delete namespace "${ns_name}"

set +o nounset
set +o errexit
}
2 changes: 2 additions & 0 deletions test/cmd/legacy-script.sh
Expand Up @@ -1013,11 +1013,13 @@ runTests() {
record_command run_kubectl_debug_pod_tests
record_command run_kubectl_debug_general_tests
record_command run_kubectl_debug_baseline_tests
record_command run_kubectl_debug_restricted_tests
fi
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_kubectl_debug_node_tests
record_command run_kubectl_debug_general_node_tests
record_command run_kubectl_debug_baseline_node_tests
record_command run_kubectl_debug_restricted_node_tests
fi

cleanup_tests
Expand Down