From ae29d9ec88de685ca77024ee0b9038a3b564aeea Mon Sep 17 00:00:00 2001 From: Johannes Schnatterer Date: Tue, 24 Sep 2019 14:18:47 +0200 Subject: [PATCH] PSP: Adds additional good practices Same as in #2 for secCtx (even some more!) Closes #3. --- 4-pod-security-policies/Readme.md | 28 +++++++++----- ...strictive.yaml => 01-psp-restrictive.yaml} | 38 +++++++++++++++---- 2 files changed, 49 insertions(+), 17 deletions(-) rename 4-pod-security-policies/demo/{01-psp-more-restrictive.yaml => 01-psp-restrictive.yaml} (52%) diff --git a/4-pod-security-policies/Readme.md b/4-pod-security-policies/Readme.md index 7f694c7..63b0764 100644 --- a/4-pod-security-policies/Readme.md +++ b/4-pod-security-policies/Readme.md @@ -13,7 +13,12 @@ kubectl config set-context $(kubectl config current-context) --namespace=wild-we cd demo -# Remove privilege psp as default +# Make simple deployment +kubectl create deployment nginx --image nginx:1.17.2 --dry-run -o yaml | kubectl apply -f - +# It's running! +kubectl get pod $(kubectl get pod | awk '/^nginx/ {print $1;exit}') + +# Remove privilege PSP as default kubectl delete rolebinding default:psp:privileged kubectl delete pod --all kubectl get pod @@ -22,17 +27,22 @@ kubectl describe rs $(kubectl get rs | awk '/all-at-once/ {print $1;exit}') | g # Error creating: pods "nginx-read-only-fs-empty-dirs-f7676b7d8-" is forbidden: unable to validate against any pod security policy: [] # replicasets are no longer allowed to schedule pods -# Use the PSP that is more restrictive -cat 01-psp-more-restrictive.yaml -kubectl apply -f 01-psp-more-restrictive.yaml +# Use PSP that is more restrictive +cat 01-psp-restrictive.yaml +kubectl apply -f 01-psp-restrictive.yaml # Delete replica sets -> Deployments create new ones which adhere to new PSP kubectl delete rs --all watch kubectl get pods -# Most pods are failing - why? -kubectl get pod $(kubectl get pods | awk '/nginx/ {print $1;exit}') -o yaml --export | grep -A4 securityContext +# Pods that comply with PSP are now running, e.g. +kubectl get pod $(kubectl get pods | awk '/all-at-once/ {print $1;exit}') +# But Most pods are failing - why? # The new ReplicaSets set the securityContext adhering to PSP -> e.g. original nginx image cannot run as uid 1 +kubectl describe pod $(kubectl get pods | awk '/^nginx/ {print $1;exit}') | grep Error + +### Best Option: Change deployment to adhere to PSP +cat ../../3-security-context/demo/13-deployment-all-at-once.yaml | grep -A8 securityContext -### One option: "Whitelist" pod to use privileged psp +### Less secure alternative: "Whitelist" pod to use less restrictive PSP cat 02a-psp-whitelist.yaml kubectl apply -f 02a-psp-whitelist.yaml # Use service account for nginx pod @@ -42,9 +52,9 @@ kubectl delete pod $(kubectl get pods | awk '/^nginx/ {print $1;exit}') # Now runs again kubectl get pod $(kubectl get pods | awk '/^nginx/ {print $1;exit}') -# statefulsets are also restricted by psp +# statefulsets are also restricted by PSP cat 03-statefulset.yaml -kubectl describe statefulset stateful | grep error +kubectl describe pod stateful-0 | grep error ``` diff --git a/4-pod-security-policies/demo/01-psp-more-restrictive.yaml b/4-pod-security-policies/demo/01-psp-restrictive.yaml similarity index 52% rename from 4-pod-security-policies/demo/01-psp-more-restrictive.yaml rename to 4-pod-security-policies/demo/01-psp-restrictive.yaml index add2d43..5a60827 100644 --- a/4-pod-security-policies/demo/01-psp-more-restrictive.yaml +++ b/4-pod-security-policies/demo/01-psp-restrictive.yaml @@ -3,26 +3,48 @@ apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + # Not setting this will result in blocking pods that have this profile set explicitly + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' + + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' spec: + allowedHostPaths: [] + hostIPC: false + hostNetwork: false + hostPID: false + hostPorts: [] + + requiredDropCapabilities: + - ALL + allowedCapabilities: [] + privileged: false + defaultAllowPrivilegeEscalation: false allowPrivilegeEscalation: false - # Otherwise configmaps, secrets, etc. are not allowed to be used + # Allow core volume types. But more specifically, don't allow mounting host volumes to include the Docker socket - '/var/run/docker.sock' + # https://kurtmadel.com/posts/native-kubernetes-continuous-delivery/building-container-images-with-kubernetes/ + # Without the "volumes" block configmaps, secrets, etc. are not allowed to be used volumes: - - '*' + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' runAsUser: rule: 'MustRunAsNonRoot' # Note that this also forces as numeric uid! - # ranges: 1..65535 seems to force set uid 1. supplementalGroups: rule: 'MustRunAs' ranges: - - min: 1 - max: 65535 + - min: 100000 + max: 999999 fsGroup: rule: 'MustRunAs' ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 + - min: 100000 + max: 999999 readOnlyRootFilesystem: true # Default that must be set