Skip to content
This repository has been archived by the owner on Jun 26, 2023. It is now read-only.

Commit

Permalink
Merge pull request #1251 from phoenixking25/second-ns
Browse files Browse the repository at this point in the history
[MTB] refactored code and added other ns flag
  • Loading branch information
k8s-ci-robot committed Dec 11, 2020
2 parents 66244f1 + c3a27b5 commit 083c78c
Show file tree
Hide file tree
Showing 35 changed files with 173 additions and 39 deletions.
109 changes: 72 additions & 37 deletions benchmarks/kubectl-mtb/internal/kubectl-mtb/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func reportSuiteDidEnd(suiteSummary *reporter.SuiteSummary, reportersArray []rep
}

func removeBenchmarksWithIDs(ids []string) {
temp := []*benchmark.Benchmark{}
var temp []*benchmark.Benchmark
for _, benchmark := range benchmarks {
found := false
for _, id := range ids {
Expand Down Expand Up @@ -143,22 +143,78 @@ func validateFlags(cmd *cobra.Command) error {
return nil
}

func runTests(cmd *cobra.Command, args []string) error {

benchmarkRunOptions.Label, _ = cmd.Flags().GetString("labels")
// Get log level
func setupLogger(cmd *cobra.Command) {
debug, _ := cmd.Flags().GetBool("debug")
if debug {
benchmarkRunOptions.Logger = log.GetLogger(true)
} else {
// default mode production
benchmarkRunOptions.Logger = log.GetLogger(false)
}
}

func setupReporters(cmd *cobra.Command) ([]reporter.Reporter, error) {
// Get reporters from the user
reporterFlag, _ := cmd.Flags().GetString("out")
reporters := strings.Split(reporterFlag, ",")
reportersArray, err := reporter.GetReporters(reporters)
return reporter.GetReporters(reporters)
}

func executePreRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
err := b.PreRun(benchmarkRunOptions)
if err != nil {
benchmarkRunOptions.Logger.Debug(err.Error())
suiteSummary.NumberOfFailedValidations++
ts.Validation = false
ts.ValidationError = err
b.Status = "Error"
}
}

func executeRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
if ts.Validation {
err := b.Run(benchmarkRunOptions)
if err != nil {
benchmarkRunOptions.Logger.Debug(err.Error())
suiteSummary.NumberOfFailedTests++
ts.Test = false
ts.TestError = err
b.Status = "Fail"
} else {
suiteSummary.NumberOfPassedTests++
b.Status = "Pass"
}
}
}

func executePostRun(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) {
if ts.Test {
if b.PostRun != nil {
err := b.PostRun(benchmarkRunOptions)
if err != nil {
fmt.Print(err.Error())
}
}
}
}

func shouldSkipTest(b *benchmark.Benchmark, suiteSummary *reporter.SuiteSummary, ts *reporter.TestSummary) bool {
if b.NamespaceRequired > 1 {
if benchmarkRunOptions.OtherNamespace != "" && benchmarkRunOptions.OtherTenant != "" {
return false
}
return true
}
return false
}

func runTests(cmd *cobra.Command, args []string) error {

benchmarkRunOptions.Label, _ = cmd.Flags().GetString("labels")
// Get log level
setupLogger(cmd)

reportersArray, err := setupReporters(cmd)
if err != nil {
return err
}
Expand Down Expand Up @@ -192,40 +248,17 @@ func runTests(cmd *cobra.Command, args []string) error {

startTest := time.Now()

//Run Prerun
err = b.PreRun(benchmarkRunOptions)
if err != nil {
benchmarkRunOptions.Logger.Debug(err.Error())
suiteSummary.NumberOfFailedValidations++
ts.Validation = false
ts.ValidationError = err
b.Status = "Error"
if shouldSkipTest(b, suiteSummary, ts) {
continue
}

// Check PreRun status
if ts.Validation {
err = b.Run(benchmarkRunOptions)
if err != nil {
benchmarkRunOptions.Logger.Debug(err.Error())
suiteSummary.NumberOfFailedTests++
ts.Test = false
ts.TestError = err
b.Status = "Fail"
} else {
suiteSummary.NumberOfPassedTests++
b.Status = "Pass"
}
}
// Lifecycles
executePreRun(b, suiteSummary, ts)

executeRun(b, suiteSummary, ts)

executePostRun(b, suiteSummary, ts)

// Check Run status
if ts.Test {
if b.PostRun != nil {
err = b.PostRun(benchmarkRunOptions)
if err != nil {
fmt.Print(err.Error())
}
}
}
elapsed := time.Since(startTest)
ts.RunTime = elapsed
reportTestWillRun(ts, reportersArray)
Expand All @@ -245,6 +278,8 @@ func newRunCmd() *cobra.Command {
runCmd.Flags().String("as", "", "(required) user name to impersonate")
runCmd.Flags().StringP("out", "o", "default", "(optional) output reporters (default, policyreport)")
runCmd.Flags().StringP("skip", "s", "", "(optional) benchmark IDs to skip")
runCmd.Flags().String("other-namespace", "", "(optional) other tenant namespace")
runCmd.Flags().String("other-tenant-admin","", "(optional) other tenant admin")
runCmd.Flags().StringP("labels", "l", "", "(optional) labels")

return runCmd
Expand Down
1 change: 1 addition & 0 deletions benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ type Benchmark struct {
Status string `yaml:"status"`
Rationale string `yaml:"rationale"`
Audit string `yaml:"audit"`
NamespaceRequired int `yaml:"namespaceRequired"`
PreRun func(types.RunOptions) error
Run func(types.RunOptions) error
PostRun func(types.RunOptions) error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,8 @@ kubectl --kubeconfig tenant-a auth can-i verb resource
Each command must return 'no'



**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Control Plane Isolation
description: Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
remediation:
profileLevel: 1
namespaceRequired: 1
rationale: Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
audit: |
Run the following commands to retrieve the list of non-namespaced resources
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod or container that adds new `capabilities` in its `securityContext`.

Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ description: Linux
remediation: Define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce new capabilities cannot be added. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
rationale: Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
profileLevel: 1
namespaceRequired: 1
audit: Create a pod or container that adds new `capabilities` in its `securityContext`. The pod creation must fail.
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,8 @@ For each returned by the first command verify that the resource cannot be modifi

Each command must return 403 FORBIDDEN


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Tenant Isolation
description: Each tenant namespace may contain resources setup by the cluster administrator for multi-tenancy, such as role bindings, and network policies. Tenants should not be allowed to modify the namespaced resources created by the cluster administrator for multi-tenancy. However, for some resources such as network policies, tenants can configure additional instances of the resource for their workloads.
remediation:
profileLevel: 1
namespaceRequired: 1
rationale: Tenants can escalate priviliges and impact other tenants if they are able to delete or modify required multi-tenancy resources such as namespace resource quotas or default network policy.
audit: |
The resources managed by the cluster administrator and that cannot be modified by tenant administrator can be identified by a label configured in the benchmarks configuration YAML file. If no label is provided, then this test looks for any existing network policy and role binding (resource quotas are handled by a separate test) and tries to modify and delete them. Run the following commands to retrieve the list of resources managed by the cluster administrator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,8 @@ kubectl --kubeconfig=tenant-a -n a1 auth can-i deletecollection quota
```
Each command must return 'no'"


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Tenant Isolation
description: Tenants should not be able to modify the resource quotas defined in their namespaces
remediation:
profileLevel: 1
namespaceRequired: 1
rationale: Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
audit: |
Run the following commands to check for permissions to manage quotas in the tenant namespace:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its

Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Control Plane Isolation
description: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
remediation: Define a `PodSecurityPolicy` with `allowPrivilegeEscalation` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent privilege escalation. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
namespaceRequired: 1
audit: Create a pod or container that sets `allowPrivilegeEscalation` to `true` in its `securityContext`. The pod creation must fail.
rationale: The `securityContext.allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod or container that sets `privileged` to `true` in its `securityConte

Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Control Plane Isolation
rationale: By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
remediation: Define a `PodSecurityPolicy` with `privileged` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to prevent tenants from running privileged containers. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
description: Linux
description: Linux
namespaceRequired: 1
audit: Create a pod or container that sets `privileged` to `true` in its `securityContext`. The pod creation must fail.
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod or container that sets new `hostIPC` to `true`. The pod creation mu

Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Host Isolation
description: Tenants should not be allowed to share the host's inter-process communication (IPC) namespace.
remediation: Define a `PodSecurityPolicy` with `hostIPC` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
namespaceRequired: 1
audit: Create a pod or container that sets new `hostIPC` to `true`. The pod creation must fail.
rationale: The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants.
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,8 @@ Create a pod defining a container using a host port. The pod creation must fail.

Create a pod defining a container using a host network. The pod creation must fail."


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Host Isolation
description: Tenants should not be allowed to use host networking and host ports for their workloads.
remediation:
profileLevel: 1
namespaceRequired: 1
rationale: Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods
audit: |
Create a pod defining a container using a host port. The pod creation must fail.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod defining a volume of type hostpath. The pod creation must fail.

Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Host Protection
description: Tenants should not be able to mount host volumes and directories
remediation: Define a `PodSecurityPolicy` that restricts hostPath volumes and map the policy to each tenant namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that a `hostPath` volume cannot be used. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
namespaceRequired: 1
rationale: The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host.
audit: Create a pod defining a volume of type hostpath. The pod creation must fail.
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a pod or container that sets new `hostPID` to `true`. The pod creation mu

Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Host Isolation
description: Tenants should not be allowed to share the host process ID (PID) namespace.
remediation: Define a `PodSecurityPolicy` with `hostPID` set to `false` and map the policy to each tenant's namespace, or use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to enforce that `hostPID` cannot be set to `true`. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
namespaceRequired: 1
audit: Create a pod or container that sets new `hostPID` to `true`. The pod creation must fail.
rationale: The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace.
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,8 @@ Create a deployment and an associated service exposing a NodePort. The service c

Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,6 @@ category: Host Isolation
description: Tenants should not be able to create services of type NodePort.
remediation: Use a policy engine such as [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper) or [Kyverno](https://kyverno.io) to block NodePort Services. You can use the policies present [here](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks/kubectl-mtb/test/policies).
profileLevel: 1
namespaceRequired: 1
audit: Create a deployment and an associated service exposing a NodePort. The service creation must fail.
rationale: NodePorts configure host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers.
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,8 @@ kubectl --kubeconfig=tenant-a -n a1 describe resourcequota

Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Fairness
description: Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace.
remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
profileLevel: 1
namespaceRequired: 1
Rationale: Resource quotas must be configured for each tenant namespace, to guarantee fair number of objects across tenants.
Audit: |
Run the following command to show configured quotas. Make sure that a quota is configured for API objects(PersistentVolumeClaim, LoadBalancer, NodePort ,Pods etc).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,8 @@ kubectl --kubeconfig=tenant-a -n a1 describe quota

Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`


**namespaceRequired:**

1

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ category: Fairness
description: Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources.
remediation: Create ResourceQuota object, you can use the configuration file present in `quotas` directory, example `kubectl apply -f test/quotas/ns_quota.yaml`
profileLevel: 1
namespaceRequired: 1
audit: |
Run the following command to show configured quotas. Make sure that a quota is configured for CPU, memory, and storage resources.
```shell
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,8 @@ kubectl --kubeconfig=tenant-a -n a1 auth can-i verb networkpolicy
Each command must return 'yes'



**namespaceRequired:**

1

Loading

0 comments on commit 083c78c

Please sign in to comment.