From 742325307e1c7fd0d81769525116a1ec0e8c3a10 Mon Sep 17 00:00:00 2001 From: Anuj Shamra Date: Sat, 1 Aug 2020 02:56:08 +0530 Subject: [PATCH 1/4] corrected config yaml data --- benchmarks/kubectl-mtb/docs/main.go | 32 ++++++++++++++++--- benchmarks/kubectl-mtb/go.sum | 6 ++-- .../kubectl-mtb/pkg/benchmark/benchmark.go | 2 ++ .../README.md | 32 ++++++++++++++++--- .../config.yaml | 10 ++++++ .../block_add_capabilities/README.md | 28 +++++++++++++--- .../block_add_capabilities/config.yaml | 6 ++-- .../test/benchmarks/block_ns_quota/README.md | 30 ++++++++++++++--- .../benchmarks/block_ns_quota/config.yaml | 12 ++++++- .../block_privilege_escalation/README.md | 28 +++++++++++++--- .../block_privilege_escalation/config.yaml | 4 ++- .../block_privileged_containers/README.md | 28 +++++++++++++--- .../block_privileged_containers/config.yaml | 6 ++-- .../block_use_of_bind_mounts/README.md | 28 +++++++++++++--- .../block_use_of_bind_mounts/config.yaml | 4 ++- .../block_use_of_host_ipc/README.md | 28 +++++++++++++--- .../block_use_of_host_ipc/config.yaml | 4 ++- .../README.md | 29 ++++++++++++++--- .../config.yaml | 6 +++- .../block_use_of_host_pid/README.md | 28 +++++++++++++--- .../block_use_of_host_pid/config.yaml | 4 ++- .../block_use_of_nodeport_services/README.md | 28 +++++++++++++--- .../config.yaml | 4 ++- .../configure_ns_object_quota/README.md | 32 ++++++++++++++++--- .../configure_ns_object_quota/config.yaml | 5 ++- .../benchmarks/configure_ns_quotas/README.md | 29 ++++++++++++++--- .../configure_ns_quotas/config.yaml | 4 +++ .../create_network_policies/README.md | 30 ++++++++++++++--- .../create_network_policies/config.yaml | 8 ++++- .../require_run_as_non_root_user/README.md | 28 +++++++++++++--- .../require_run_as_non_root_user/config.yaml | 6 ++-- 31 files changed, 435 insertions(+), 94 deletions(-) diff --git a/benchmarks/kubectl-mtb/docs/main.go b/benchmarks/kubectl-mtb/docs/main.go index b09857cb5..24ebfa84a 100644 --- a/benchmarks/kubectl-mtb/docs/main.go +++ b/benchmarks/kubectl-mtb/docs/main.go @@ -27,25 +27,47 @@ type Doc struct { Description string `yaml:"description"` Remediation string `yaml:"remediation"` ProfileLevel int `yaml:"profileLevel"` + Rationale string `yaml:"rationale"` + Audit string `yaml:"audit"` AdditionalField map[string]interface{} `yaml:"additionalFields"` } // README template const templ = `# {{.Title}} [{{.ID}}] -**Profile Applicability:** + +**Profile Applicability:** + {{.ProfileLevel}}
-**Type:** + +**Type:** + {{.BenchmarkType}}
-**Category:** + +**Category:** + {{.Category}}
-**Description:** + +**Description:** + {{.Description}}
-**Remediation:** + +**Rationale:** + +{{.Rationale}}
+ +**Audit:** + +{{.Audit}}
+ {{.Remediation}}
+ {{ range $key, $value := .AdditionalField }} **{{ $key }}:** + {{ $value }}
+ {{ end }} + ` func exists(path string) (bool, error) { diff --git a/benchmarks/kubectl-mtb/go.sum b/benchmarks/kubectl-mtb/go.sum index 81a74b04f..dad9998aa 100644 --- a/benchmarks/kubectl-mtb/go.sum +++ b/benchmarks/kubectl-mtb/go.sum @@ -1487,14 +1487,16 @@ sigs.k8s.io/multi-tenancy v0.0.0-20200710135948-2d1071532987 h1:tJTZBwaG1ryvKEM9 sigs.k8s.io/multi-tenancy v0.0.0-20200710152148-20515322b4e5 h1:zNaPpPazyROkh3h19JUmxlcfTUqFQ7ZxlX/g1p0jl0A= sigs.k8s.io/multi-tenancy v0.0.0-20200713220920-829ca66edf83 h1:Wu4A0FA9gXUxB+BOb/LzqU8S8EUdZE92s0YU5WXtLcM= sigs.k8s.io/multi-tenancy v0.0.0-20200714035720-9254d886f1e8 h1:2jvDW9Ut25bjFIsVPj66RNohUl+e3xmlXeKtDM3XLkg= -sigs.k8s.io/multi-tenancy v0.0.0-20200726013016-97a38fedf0b1 h1:y8ONNC+S0jkxria4hGCq+HXVxbSXK72kKkyR4WK2vfw= sigs.k8s.io/multi-tenancy v0.0.0-20200724204617-6364dbba69da h1:HOR9N89EJFKSyQmD3/x36+FxkYIGMEg/8a4N+IiCUG8= +sigs.k8s.io/multi-tenancy v0.0.0-20200726013016-97a38fedf0b1 h1:y8ONNC+S0jkxria4hGCq+HXVxbSXK72kKkyR4WK2vfw= +sigs.k8s.io/multi-tenancy v0.0.0-20200731200539-a59bb770c223 h1:3LVSAKupidlml1n/MbHPbJr5m48kpsOKFVAcsOhXgN0= sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200707060558-ea14282f3be6 h1:V4K5fPHAgNnYTFmhKlU4cp03o7/nuZbbVqFnEHvcyHk= sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200710152148-20515322b4e5 h1:h21E7xB6JQ19Hy5ypObM90L4xScjwiNQxrOACXJ409w= sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200713220920-829ca66edf83 h1:nmcpLotBZVRnlvDDd3q9b2J9VuW2rfkCRBl+1x/0rfk= sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200714035720-9254d886f1e8 h1:tLrFy2wLP0LJSQORg9FslngBnoADSEn+uYju2W3eOjk= -sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200726013016-97a38fedf0b1 h1:6shszoTBt41BnJeg6gGyF5phNzfX0CMKJy/Mn71Oz/M= sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200724204617-6364dbba69da h1:sZgkCMXKgOF4Diom1+CeyUNmrtu+9BPV+CV3nMzAfJM= +sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200726013016-97a38fedf0b1 h1:6shszoTBt41BnJeg6gGyF5phNzfX0CMKJy/Mn71Oz/M= +sigs.k8s.io/multi-tenancy/benchmarks v0.0.0-20200731200539-a59bb770c223 h1:fVp4SgTf3sYqwf85QT5CFDblbEqmDgF0pJhHyFRjkCU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go b/benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go index 281e4bc3b..14b8a6012 100644 --- a/benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go +++ b/benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go @@ -16,6 +16,8 @@ type Benchmark struct { Description string `yaml:"description"` Remediation string `yaml:"remediation"` ProfileLevel int `yaml:"profileLevel"` + Rationale string `yaml:"rationale"` + Audit string `yaml:"audit"` PreRun func(string, *kubernetes.Clientset, *kubernetes.Clientset) error Run func(string, *kubernetes.Clientset, *kubernetes.Clientset) error } diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md index 1fa0c0de9..03b18bb43 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md @@ -1,12 +1,34 @@ # Block access to cluster resources [MTB-PL1-CC-CPI-1] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Configuration Check
-**Category:** + +**Category:** + Control Plane Isolation
-**Description:** + +**Description:** + Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
-**Remediation:** + +**Rationale:** + +Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
+ +**Audit:** + +Run the following commands to retrieve the list of non-namespaced resources +kubectl --kubeconfig cluster-admin api-resources --namespaced=false +For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands: +kubectl --kubeconfig tenant-a auth can-i <verb> <resource> +Each command must return 'no'
+
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml index 91186251a..9053d6391 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml @@ -5,3 +5,13 @@ category: Control Plane Isolation description: Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc. remediation: profileLevel: 1 +rationale: Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources +audit: "Run the following commands to retrieve the list of non-namespaced resources + + kubectl --kubeconfig cluster-admin api-resources --namespaced=false + +For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands: + + kubectl --kubeconfig tenant-a auth can-i + +Each command must return 'no'" diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md index ec5924991..b9570b773 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md @@ -1,12 +1,30 @@ # Block add capabilities [MTB-PL1-BC-CPI-3] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Control Plane Isolation
-**Description:** + +**Description:** + +Linux
+ +**Rationale:** + Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
-**Remediation:** + +**Audit:** + +Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail.
+ Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/config.yaml index 12128e81f..13c56c402 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/config.yaml @@ -2,6 +2,8 @@ id: MTB-PL1-BC-CPI-3 title: Block add capabilities benchmarkType: Behavioral Check category: Control Plane Isolation -description: Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors. +description: Linux remediation: Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +rationale: Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors. +profileLevel: 1 +audit: Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md index eb2936a36..2d4385ee8 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md @@ -1,12 +1,32 @@ # Block modification of resource quotas [MTB-PL1-CC-TI-1] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Tenant Isolation
-**Description:** + +**Description:** + Tenants should not be able to modify the resource quotas defined in their namespaces
-**Remediation:** + +**Rationale:** + +Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
+ +**Audit:** + +Run the following commands to check for permissions to manage quotas in the tenant namespace: +kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota +Each command must return 'no'
+
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml index 3d47cb342..424c63ef8 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml @@ -4,4 +4,14 @@ benchmarkType: Behavioral Check category: Tenant Isolation description: Tenants should not be able to modify the resource quotas defined in their namespaces remediation: -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +rationale: Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants. +audit: "Run the following commands to check for permissions to manage quotas in the tenant namespace: + + kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota + +Each command must return 'no'" \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md index a85e19081..0cda61a58 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md @@ -1,12 +1,30 @@ # Block privilege escalation [MTB-PL1-BC-CPI-6] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Control Plane Isolation
-**Description:** + +**Description:** + The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
-**Remediation:** + +**Rationale:** + +The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
+ +**Audit:** + +Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail.
+ Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/config.yaml index 7b0305169..724d5a1b8 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/config.yaml @@ -4,4 +4,6 @@ benchmarkType: Behavioral Check category: Control Plane Isolation description: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges. remediation: Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +audit: Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail. +rationale: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md index 153a67213..829ca39b8 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md @@ -1,12 +1,30 @@ # Block privileged containers [MTB-PL1-BC-CPI-5] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Control Plane Isolation
-**Description:** + +**Description:** + +Linux
+ +**Rationale:** + By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
-**Remediation:** + +**Audit:** + +Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail.
+ Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/config.yaml index 8a22a8d2c..3a6d28902 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/config.yaml @@ -2,6 +2,8 @@ id: MTB-PL1-BC-CPI-5 title: Block privileged containers benchmarkType: Behavioral Check category: Control Plane Isolation -description: By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers. +rationale: By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers. remediation: Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +description: Linux +audit: Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md index 2748771a8..c451ff786 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md @@ -1,12 +1,30 @@ # Block use of bind mounts [MTB-PL1-BC-HI-1] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Host Protection
-**Description:** + +**Description:** + Tenants should not be able to mount host volumes and folders (bind mounts).
-**Remediation:** + +**Rationale:** + +The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host.
+ +**Audit:** + +Create a pod defining a volume of type hostpath. The pod creation must fail.
+ Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/config.yaml index 37208c163..bbca9251e 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/config.yaml @@ -4,4 +4,6 @@ benchmarkType: Behavioral Check category: Host Protection description: Tenants should not be able to mount host volumes and folders (bind mounts). remediation: Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +rationale: The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host. +audit: Create a pod defining a volume of type hostpath. The pod creation must fail. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md index 5d3a8fb75..4626d4793 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md @@ -1,12 +1,30 @@ # Block use of host IPC [MTB-PL1-BC-HI-5] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Host Isolation
-**Description:** + +**Description:** + Tenants should not be allowed to share the host's inter-process communication (IPC) namespace.
-**Remediation:** + +**Rationale:** + +The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants.
+ +**Audit:** + +Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail.
+ Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/config.yaml index bd21c5062..99d80681b 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/config.yaml @@ -4,4 +4,6 @@ benchmarkType: Behavioral Check category: Host Isolation description: Tenants should not be allowed to share the host's inter-process communication (IPC) namespace. remediation: Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +audit: Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail. +rationale: The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md index 8bcaadc03..52181d61a 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md @@ -1,12 +1,31 @@ # Block use of host networking and ports [MTB-PL1-BC-HI-3] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Host Isolation
-**Description:** + +**Description:** + Tenants should not be allowed to use host networking and host ports for their workloads.
-**Remediation:** + +**Rationale:** + +Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods
+ +**Audit:** + +Create a pod defining a container using a host port. The pod creation must fail. +Create a pod defining a container using a host network. The pod creation must fail.
+
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml index 317a5d78b..adcd4dbbf 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml @@ -4,4 +4,8 @@ benchmarkType: Behavioral Check category: Host Isolation description: Tenants should not be allowed to use host networking and host ports for their workloads. remediation: -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +rationale: Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods +audit: "Create a pod defining a container using a host port. The pod creation must fail. + +Create a pod defining a container using a host network. The pod creation must fail." \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md index a1a8cb7d4..9cf9326cc 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md @@ -1,12 +1,30 @@ # Block use of host PID [MTB-PL1-BC-HI-4] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Host Isolation
-**Description:** + +**Description:** + Tenants should not be allowed to share the host process ID (PID) namespace.
-**Remediation:** + +**Rationale:** + +The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace.
+ +**Audit:** + +Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail.
+ Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/config.yaml index c50425f94..e111d14d0 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/config.yaml @@ -4,4 +4,6 @@ benchmarkType: Behavioral Check category: Host Isolation description: Tenants should not be allowed to share the host process ID (PID) namespace. remediation: Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +audit: Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail. +rationale: The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md index 3ae80cc41..80f242a3b 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md @@ -1,12 +1,30 @@ # Block use of NodePort services [MTB-PL1-BC-HI-1] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Host Isolation
-**Description:** + +**Description:** + Tenants should not be able to create services of type NodePort.
-**Remediation:** + +**Rationale:** + +NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers.
+ +**Audit:** + +Create a deployment and an associated service exposing a NodePort. The service creation must fail.
+ Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/config.yaml index 5ea6fa6f2..b9009cf07 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/config.yaml @@ -4,4 +4,6 @@ benchmarkType: Behavioral Check category: Host Isolation description: Tenants should not be able to create services of type NodePort. remediation: Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +audit: Create a deployment and an associated service exposing a NodePort. The service creation must fail. +rationale: NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md index ea2fa0df1..1ecdb46a9 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md @@ -1,19 +1,41 @@ # Configure namespace object limits [MTB-PL1-CC-FNS-2] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Configuration
-**Category:** + +**Category:** + Fairness
-**Description:** + +**Description:** + Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace.
-**Remediation:** + +**Rationale:** + +
+ +**Audit:** + +
+ Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
+ **Audit:** + Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). kubectl --kubeconfig=tenant-a -n a1 describe resourcequota
+ **Rationale:** + Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml index e3c755c32..b7679b716 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml @@ -6,7 +6,6 @@ description: Namespace resource quotas should be used to allocate, track and lim remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` profileLevel: 1 Rationale: Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants. -Audit: - Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). +Audit: "Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). - kubectl --kubeconfig=tenant-a -n a1 describe resourcequota + kubectl --kubeconfig=tenant-a -n a1 describe resourcequota" diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md index f3f9eab60..60a593b9f 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md @@ -1,12 +1,31 @@ # Configure namespace resource quotas [MTB-PL1-CC-FNS-1] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Configuration
-**Category:** + +**Category:** + Fairness
-**Description:** + +**Description:** + Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources.
-**Remediation:** + +**Rationale:** + +Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants.
+ +**Audit:** + +Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources. +kubectl --kubeconfig=tenant-a -n a1 describe quota
+ Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml index 5cda52eda..22c0c79ff 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml @@ -5,3 +5,7 @@ category: Fairness description: Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources. remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` profileLevel: 1 +audit: "Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources. + + kubectl --kubeconfig=tenant-a -n a1 describe quota" +rationale: Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md index 57f10aadf..364a77119 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md @@ -1,12 +1,32 @@ # Create Network Policies [MTB-PL2-BC-OPS-4] -**Profile Applicability:** + +**Profile Applicability:** + 2
-**Type:** + +**Type:** + Behavioral
-**Category:** + +**Category:** + Self-Service Operations
-**Description:** + +**Description:** + Tenants should be able to perform self-service operations by creating own network policies in their namespaces.
-**Remediation:** + +**Rationale:** + +Enables self-service management of network-policies.
+ +**Audit:** + +Run the following commands to check for permissions to manage `network-policy` for each verb(get, create, update, patch, delete, and deletecollection) in the tenant namespace: +kubectl --kubeconfig=tenant-a -n a1 auth can-i <verb> networkpolicy +Each command must return 'yes'
+
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml index 5230465e7..d3f9fee82 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml @@ -4,4 +4,10 @@ benchmarkType: Behavioral category: Self-Service Operations description: Tenants should be able to perform self-service operations by creating own network policies in their namespaces. remediation: -profileLevel: 2 \ No newline at end of file +profileLevel: 2 +audit: "Run the following commands to check for permissions to manage `network-policy` for each verb(get, create, update, patch, delete, and deletecollection) in the tenant namespace: + + kubectl --kubeconfig=tenant-a -n a1 auth can-i networkpolicy + +Each command must return 'yes'" +rationale: Enables self-service management of network-policies. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md index 577e92a26..218b93a49 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md @@ -1,12 +1,30 @@ # Require run as non-root user [MTB-PL1-BC-CPI-4] -**Profile Applicability:** + +**Profile Applicability:** + 1
-**Type:** + +**Type:** + Behavioral Check
-**Category:** + +**Category:** + Control Plane Isolation
-**Description:** + +**Description:** + +Linux
+ +**Rationale:** + Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users.
-**Remediation:** + +**Audit:** + +Create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`. The pod creation must fail for both cases.
+ Define a PodSecurityPolicy a runAsUser rule set to MustRunAsNonRoot and map the policy to each tenant's namespace, or use a policy engine such as OPA/Gatekeeper or Kyverno to enforce that runAsNonRoot is set to true for tenant pods. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
+ + diff --git a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/config.yaml index 9d77d4f54..9e7a4c061 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/config.yaml @@ -2,6 +2,8 @@ id: MTB-PL1-BC-CPI-4 title: Require run as non-root user benchmarkType: Behavioral Check category: Control Plane Isolation -description: Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users. +rationale: Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users. remediation: Define a PodSecurityPolicy a runAsUser rule set to MustRunAsNonRoot and map the policy to each tenant's namespace, or use a policy engine such as OPA/Gatekeeper or Kyverno to enforce that runAsNonRoot is set to true for tenant pods. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). -profileLevel: 1 \ No newline at end of file +profileLevel: 1 +audit: Create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`. The pod creation must fail for both cases. +description: Linux \ No newline at end of file From cdd2539f5aa742a427eeade40225da41e30a3bb5 Mon Sep 17 00:00:00 2001 From: Anuj Shamra Date: Sun, 2 Aug 2020 13:02:44 +0530 Subject: [PATCH 2/4] corrected readme and congigs --- benchmarks/kubectl-mtb/docs/main.go | 56 +++++++++++-------- .../README.md | 21 ++++--- .../config.yaml | 19 ++++--- .../block_add_capabilities/README.md | 16 +++--- .../test/benchmarks/block_ns_quota/README.md | 22 +++++--- .../benchmarks/block_ns_quota/config.yaml | 19 ++++--- .../block_privilege_escalation/README.md | 16 +++--- .../block_privileged_containers/README.md | 16 +++--- .../block_use_of_bind_mounts/README.md | 16 +++--- .../block_use_of_host_ipc/README.md | 16 +++--- .../README.md | 13 ++--- .../config.yaml | 5 +- .../block_use_of_host_pid/README.md | 16 +++--- .../block_use_of_nodeport_services/README.md | 16 +++--- .../configure_ns_object_quota/README.md | 23 ++++---- .../configure_ns_object_quota/config.yaml | 8 ++- .../benchmarks/configure_ns_quotas/README.md | 17 +++--- .../configure_ns_quotas/config.yaml | 8 ++- .../create_network_policies/README.md | 17 +++--- .../create_network_policies/config.yaml | 11 ++-- .../require_run_as_non_root_user/README.md | 16 +++--- 21 files changed, 190 insertions(+), 177 deletions(-) diff --git a/benchmarks/kubectl-mtb/docs/main.go b/benchmarks/kubectl-mtb/docs/main.go index 24ebfa84a..f54a1ec68 100644 --- a/benchmarks/kubectl-mtb/docs/main.go +++ b/benchmarks/kubectl-mtb/docs/main.go @@ -32,43 +32,43 @@ type Doc struct { AdditionalField map[string]interface{} `yaml:"additionalFields"` } -// README template -const templ = `# {{.Title}} [{{.ID}}] +func ReadmeTemplate() []byte { + return []byte( + `# {{.Title}} [{{.ID}}] **Profile Applicability:** -{{.ProfileLevel}}
+{{.ProfileLevel}} **Type:** -{{.BenchmarkType}}
+{{.BenchmarkType}} **Category:** -{{.Category}}
+{{.Category}} **Description:** -{{.Description}}
+{{.Description}} **Rationale:** -{{.Rationale}}
+{{.Rationale}} **Audit:** -{{.Audit}}
+{{.Audit}} -{{.Remediation}}
+{{.Remediation}} {{ range $key, $value := .AdditionalField }} **{{ $key }}:** -{{ $value }}
+{{ $value }} -{{ end }} - -` +{{ end }}`) +} func exists(path string) (bool, error) { _, err := os.Stat(path) @@ -132,8 +132,8 @@ func main() { for _, i := range values { deleteFields(i, d.AdditionalField) } - t := template.New("README template") - t, err = t.Parse(templ) + // t := template.New("README template") + // t, err = t.Parse(templ) // Get directory of the config file dirPath := getDirectory(path, "/") @@ -144,18 +144,30 @@ func main() { return err } - f, err := os.Create(dirPath + "/README.md") - if err != nil { - return err - } + // f, err := os.Create(dirPath + "/README.md") + // if err != nil { + // return err + // } + + // // Write the output to the README file + // err = t.Execute(f, d) + // if err != nil { + // return err + // } + + // err = f.Close() + // if err != nil { + // return err + // } - // Write the output to the README file - err = t.Execute(f, d) + mainFile, err := os.Create(fmt.Sprintf("%s/README.md", dirPath)) if err != nil { return err } + defer mainFile.Close() - err = f.Close() + mainTemplate := template.Must(template.New("main").Parse(string(ReadmeTemplate()))) + err = mainTemplate.Execute(mainFile, d) if err != nil { return err } diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md index 03b18bb43..60a511b54 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/README.md @@ -2,33 +2,36 @@ **Profile Applicability:** -1
+1 **Type:** -Configuration Check
+Configuration Check **Category:** -Control Plane Isolation
+Control Plane Isolation **Description:** -Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
+Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc. **Rationale:** -Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
+Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources **Audit:** Run the following commands to retrieve the list of non-namespaced resources +```bash kubectl --kubeconfig cluster-admin api-resources --namespaced=false -For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands: -kubectl --kubeconfig tenant-a auth can-i <verb> <resource> -Each command must return 'no'
+``` +For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands +```bash +kubectl --kubeconfig tenant-a auth can-i verb resource +``` +Each command must return 'no' -
diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml index 9053d6391..c0e65653a 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_access_to_cluster_resources/config.yaml @@ -6,12 +6,13 @@ description: Tenants should not be able to view, edit, create, or delete cluster remediation: profileLevel: 1 rationale: Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources -audit: "Run the following commands to retrieve the list of non-namespaced resources - - kubectl --kubeconfig cluster-admin api-resources --namespaced=false - -For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands: - - kubectl --kubeconfig tenant-a auth can-i - -Each command must return 'no'" +audit: | + Run the following commands to retrieve the list of non-namespaced resources + ```bash + kubectl --kubeconfig cluster-admin api-resources --namespaced=false + ``` + For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following commands + ```bash + kubectl --kubeconfig tenant-a auth can-i verb resource + ``` + Each command must return 'no' diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md index b9570b773..7ddbb3c52 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_add_capabilities/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Control Plane Isolation
+Control Plane Isolation **Description:** -Linux
+Linux **Rationale:** -Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
+Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors. **Audit:** -Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail.
- -Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail. +Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md index 2d4385ee8..9750ec524 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/README.md @@ -2,31 +2,35 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Tenant Isolation
+Tenant Isolation **Description:** -Tenants should not be able to modify the resource quotas defined in their namespaces
+Tenants should not be able to modify the resource quotas defined in their namespaces **Rationale:** -Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
+Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants. **Audit:** Run the following commands to check for permissions to manage quotas in the tenant namespace: -kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota -Each command must return 'no'
- -
+```shell +kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota +kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota +kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota +kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota +kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota +``` +Each command must return 'no'" diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml index 424c63ef8..747188537 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_ns_quota/config.yaml @@ -6,12 +6,13 @@ description: Tenants should not be able to modify the resource quotas defined in remediation: profileLevel: 1 rationale: Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants. -audit: "Run the following commands to check for permissions to manage quotas in the tenant namespace: - - kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota - kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota - kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota - kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota - kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota - -Each command must return 'no'" \ No newline at end of file +audit: | + Run the following commands to check for permissions to manage quotas in the tenant namespace: + ```shell + kubectl --kubeconfig=tenant-a -n a1 auth can-i create quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i update quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i patch quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i delete quota + kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota + ``` + Each command must return 'no'" \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md index 0cda61a58..08ac463a5 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privilege_escalation/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Control Plane Isolation
+Control Plane Isolation **Description:** -The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
+The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges. **Rationale:** -The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
+The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges. **Audit:** -Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail.
- -Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail. +Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md index 829ca39b8..a076af14a 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_privileged_containers/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Control Plane Isolation
+Control Plane Isolation **Description:** -Linux
+Linux **Rationale:** -By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
+By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers. **Audit:** -Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail.
- -Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail. +Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md index c451ff786..8c75b6bc5 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_bind_mounts/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Host Protection
+Host Protection **Description:** -Tenants should not be able to mount host volumes and folders (bind mounts).
+Tenants should not be able to mount host volumes and folders (bind mounts). **Rationale:** -The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host.
+The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host. **Audit:** -Create a pod defining a volume of type hostpath. The pod creation must fail.
- -Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod defining a volume of type hostpath. The pod creation must fail. +Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md index 4626d4793..fe201c585 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_ipc/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Host Isolation
+Host Isolation **Description:** -Tenants should not be allowed to share the host's inter-process communication (IPC) namespace.
+Tenants should not be allowed to share the host's inter-process communication (IPC) namespace. **Rationale:** -The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants.
+The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants. **Audit:** -Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail.
- -Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail. +Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md index 52181d61a..c637cef1d 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/README.md @@ -2,30 +2,29 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Host Isolation
+Host Isolation **Description:** -Tenants should not be allowed to use host networking and host ports for their workloads.
+Tenants should not be allowed to use host networking and host ports for their workloads. **Rationale:** -Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods
+Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods **Audit:** Create a pod defining a container using a host port. The pod creation must fail. -Create a pod defining a container using a host network. The pod creation must fail.
-
+Create a pod defining a container using a host network. The pod creation must fail." diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml index adcd4dbbf..3af631e5e 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_networking_and_ports/config.yaml @@ -6,6 +6,7 @@ description: Tenants should not be allowed to use host networking and host ports remediation: profileLevel: 1 rationale: Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods -audit: "Create a pod defining a container using a host port. The pod creation must fail. +audit: | + Create a pod defining a container using a host port. The pod creation must fail. -Create a pod defining a container using a host network. The pod creation must fail." \ No newline at end of file + Create a pod defining a container using a host network. The pod creation must fail." \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md index 9cf9326cc..53eef16ee 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_host_pid/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Host Isolation
+Host Isolation **Description:** -Tenants should not be allowed to share the host process ID (PID) namespace.
+Tenants should not be allowed to share the host process ID (PID) namespace. **Rationale:** -The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace.
+The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace. **Audit:** -Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail.
- -Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail. +Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md index 80f242a3b..5bcd39227 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/block_use_of_nodeport_services/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Host Isolation
+Host Isolation **Description:** -Tenants should not be able to create services of type NodePort.
+Tenants should not be able to create services of type NodePort. **Rationale:** -NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers.
+NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers. **Audit:** -Create a deployment and an associated service exposing a NodePort. The service creation must fail.
- -Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a deployment and an associated service exposing a NodePort. The service creation must fail. +Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md index 1ecdb46a9..e484c6de5 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/README.md @@ -2,40 +2,41 @@ **Profile Applicability:** -1
+1 **Type:** -Configuration
+Configuration **Category:** -Fairness
+Fairness **Description:** -Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace.
+Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace. **Rationale:** -
+ **Audit:** -
-Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
+ +Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` **Audit:** Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). -kubectl --kubeconfig=tenant-a -n a1 describe resourcequota
+```shell +kubectl --kubeconfig=tenant-a -n a1 describe resourcequota +``` -**Rationale:** - -Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.
+**Rationale:** +Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants. diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml index b7679b716..1ccd72050 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_object_quota/config.yaml @@ -6,6 +6,8 @@ description: Namespace resource quotas should be used to allocate, track and lim remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` profileLevel: 1 Rationale: Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants. -Audit: "Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). - - kubectl --kubeconfig=tenant-a -n a1 describe resourcequota" +Audit: | + Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc). + ```shell + kubectl --kubeconfig=tenant-a -n a1 describe resourcequota + ``` diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md index 60a593b9f..7cf1681f6 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/README.md @@ -2,30 +2,31 @@ **Profile Applicability:** -1
+1 **Type:** -Configuration
+Configuration **Category:** -Fairness
+Fairness **Description:** -Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources.
+Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources. **Rationale:** -Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants.
+Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants. **Audit:** Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources. -kubectl --kubeconfig=tenant-a -n a1 describe quota
- -Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
+```shell +kubectl --kubeconfig=tenant-a -n a1 describe quota +``` +Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` diff --git a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml index 22c0c79ff..78797a2ee 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/configure_ns_quotas/config.yaml @@ -5,7 +5,9 @@ category: Fairness description: Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources. remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml` profileLevel: 1 -audit: "Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources. - - kubectl --kubeconfig=tenant-a -n a1 describe quota" +audit: | + Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources. + ```shell + kubectl --kubeconfig=tenant-a -n a1 describe quota + ``` rationale: Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md index 364a77119..1d89d483f 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/README.md @@ -2,31 +2,32 @@ **Profile Applicability:** -2
+2 **Type:** -Behavioral
+Behavioral **Category:** -Self-Service Operations
+Self-Service Operations **Description:** -Tenants should be able to perform self-service operations by creating own network policies in their namespaces.
+Tenants should be able to perform self-service operations by creating own network policies in their namespaces. **Rationale:** -Enables self-service management of network-policies.
+Enables self-service management of network-policies. **Audit:** Run the following commands to check for permissions to manage `network-policy` for each verb(get, create, update, patch, delete, and deletecollection) in the tenant namespace: -kubectl --kubeconfig=tenant-a -n a1 auth can-i <verb> networkpolicy -Each command must return 'yes'
+```bash +kubectl --kubeconfig=tenant-a -n a1 auth can-i verb networkpolicy +``` +Each command must return 'yes' -
diff --git a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml index d3f9fee82..afb81068c 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml +++ b/benchmarks/kubectl-mtb/test/benchmarks/create_network_policies/config.yaml @@ -5,9 +5,10 @@ category: Self-Service Operations description: Tenants should be able to perform self-service operations by creating own network policies in their namespaces. remediation: profileLevel: 2 -audit: "Run the following commands to check for permissions to manage `network-policy` for each verb(get, create, update, patch, delete, and deletecollection) in the tenant namespace: - - kubectl --kubeconfig=tenant-a -n a1 auth can-i networkpolicy - -Each command must return 'yes'" +audit: | + Run the following commands to check for permissions to manage `network-policy` for each verb(get, create, update, patch, delete, and deletecollection) in the tenant namespace: + ```bash + kubectl --kubeconfig=tenant-a -n a1 auth can-i verb networkpolicy + ``` + Each command must return 'yes' rationale: Enables self-service management of network-policies. \ No newline at end of file diff --git a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md index 218b93a49..069a249b2 100644 --- a/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md +++ b/benchmarks/kubectl-mtb/test/benchmarks/require_run_as_non_root_user/README.md @@ -2,29 +2,27 @@ **Profile Applicability:** -1
+1 **Type:** -Behavioral Check
+Behavioral Check **Category:** -Control Plane Isolation
+Control Plane Isolation **Description:** -Linux
+Linux **Rationale:** -Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users.
+Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users. **Audit:** -Create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`. The pod creation must fail for both cases.
- -Define a PodSecurityPolicy a runAsUser rule set to MustRunAsNonRoot and map the policy to each tenant's namespace, or use a policy engine such as OPA/Gatekeeper or Kyverno to enforce that runAsNonRoot is set to true for tenant pods. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
- +Create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`. The pod creation must fail for both cases. +Define a PodSecurityPolicy a runAsUser rule set to MustRunAsNonRoot and map the policy to each tenant's namespace, or use a policy engine such as OPA/Gatekeeper or Kyverno to enforce that runAsNonRoot is set to true for tenant pods. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies). From d4c5b73a0f8294db6479c6c3736893677d77e20b Mon Sep 17 00:00:00 2001 From: Anuj Shamra Date: Sun, 2 Aug 2020 13:04:00 +0530 Subject: [PATCH 3/4] removed commented code --- benchmarks/kubectl-mtb/docs/main.go | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/benchmarks/kubectl-mtb/docs/main.go b/benchmarks/kubectl-mtb/docs/main.go index f54a1ec68..ee26b91a1 100644 --- a/benchmarks/kubectl-mtb/docs/main.go +++ b/benchmarks/kubectl-mtb/docs/main.go @@ -131,9 +131,6 @@ func main() { // during second unmarshalling for _, i := range values { deleteFields(i, d.AdditionalField) - } - // t := template.New("README template") - // t, err = t.Parse(templ) // Get directory of the config file dirPath := getDirectory(path, "/") @@ -144,22 +141,6 @@ func main() { return err } - // f, err := os.Create(dirPath + "/README.md") - // if err != nil { - // return err - // } - - // // Write the output to the README file - // err = t.Execute(f, d) - // if err != nil { - // return err - // } - - // err = f.Close() - // if err != nil { - // return err - // } - mainFile, err := os.Create(fmt.Sprintf("%s/README.md", dirPath)) if err != nil { return err From ecf91e51088c7aaddcbc0c03a6ed4addd45499c0 Mon Sep 17 00:00:00 2001 From: Anuj Shamra Date: Sun, 2 Aug 2020 13:07:56 +0530 Subject: [PATCH 4/4] fixed typo --- benchmarks/kubectl-mtb/docs/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmarks/kubectl-mtb/docs/main.go b/benchmarks/kubectl-mtb/docs/main.go index ee26b91a1..5ca8281ca 100644 --- a/benchmarks/kubectl-mtb/docs/main.go +++ b/benchmarks/kubectl-mtb/docs/main.go @@ -131,6 +131,7 @@ func main() { // during second unmarshalling for _, i := range values { deleteFields(i, d.AdditionalField) + } // Get directory of the config file dirPath := getDirectory(path, "/")