Skip to content

Commit

Permalink
feat: add support for restricting constraint to containers in pods se…
Browse files Browse the repository at this point in the history
…lected by a service

Signed-off-by: Craig Trought <k8s@trought.ca>
  • Loading branch information
ctrought committed Jan 4, 2023
1 parent 335e8e9 commit 69d2913
Show file tree
Hide file tree
Showing 26 changed files with 913 additions and 23 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
version: 1.1.0
name: k8srequiredprobes
displayName: Required Probes
createdAt: "2023-01-04T23:20:17Z"
description: Requires Pods to have readiness and/or liveness probes.
digest: 1f871f3eb3e25749d3d6872736253ae2cd2dc394ed9b402bb1560cdd28613666
license: Apache-2.0
homeURL: https://open-policy-agent.github.io/gatekeeper-library/website/requiredprobes
keywords:
- gatekeeper
- open-policy-agent
- policies
readme: |-
# Required Probes
Requires Pods to have readiness and/or liveness probes.
install: |-
### Usage
```shell
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/artifacthub/library/general/requiredprobes/1.1.0/template.yaml
```
provider:
name: Gatekeeper Library
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
resources:
- template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: constraints.gatekeeper.sh/v1beta1
kind: K8sRequiredProbes
metadata:
name: must-have-probes-on-service
spec:
enforcementAction: warn
match:
kinds:
- apiGroups: [""]
kinds: ["Pod"]
parameters:
onlyServices: true
probes: ["readinessProbe", "livenessProbe"]
probeTypes: ["tcpSocket", "httpGet", "exec"]
customViolationMessage: "See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes for more info."
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod1
namespace: default
labels:
app.kubernetes.io/name: tomcat
spec:
containers:
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
name: tomcat-http
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
volumes:
- name: cache-volume
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod1
namespace: default
labels:
app.kubernetes.io/name: tomcat-no-svc
second-label: "example"
spec:
containers:
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
volumes:
- name: cache-volume
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod1
namespace: default
labels:
app.kubernetes.io/name: tomcat
second-label: "example"
spec:
containers:
- name: nginx-1
image: nginx:1.7.9
ports:
- containerPort: 80
livenessProbe:
# tcpSocket:
# port: 80
# initialDelaySeconds: 5
# periodSeconds: 10
volumeMounts:
- mountPath: /tmp/cache
name: cache-volume
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
name: tomcat-http
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
volumes:
- name: cache-volume
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: tomcat-service
namespace: default
spec:
selector:
app.kubernetes.io/name: tomcat
ports:
- name: name-of-service-port
protocol: TCP
port: 80
targetPort: tomcat-http
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: constraints.gatekeeper.sh/v1beta1
kind: K8sRequiredProbes
metadata:
name: must-have-probes
spec:
enforcementAction: warn
match:
kinds:
- apiGroups: [""]
kinds: ["Pod"]
parameters:
onlyServices: false
probes: ["readinessProbe", "livenessProbe"]
probeTypes: ["tcpSocket", "httpGet", "exec"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod1
spec:
containers:
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
volumes:
- name: cache-volume
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod1
spec:
containers:
- name: nginx-1
image: nginx:1.7.9
ports:
- containerPort: 80
livenessProbe:
# tcpSocket:
# port: 80
# initialDelaySeconds: 5
# periodSeconds: 10
volumeMounts:
- mountPath: /tmp/cache
name: cache-volume
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
volumes:
- name: cache-volume
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
apiVersion: v1
kind: Pod
metadata:
name: test-pod2
spec:
containers:
- name: nginx-1
image: nginx:1.7.9
ports:
- containerPort: 80
readinessProbe:
# httpGet:
# path: /
# port: 80
# initialDelaySeconds: 5
# periodSeconds: 10
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
volumeMounts:
- mountPath: /tmp/cache
name: cache-volume
- name: tomcat
image: tomcat
ports:
- containerPort: 8080
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
# livenessProbe:
# tcpSocket:
# port: 8080
# initialDelaySeconds: 5
# periodSeconds: 10
volumes:
- name: cache-volume
emptyDir: {}
43 changes: 43 additions & 0 deletions artifacthub/library/general/requiredprobes/1.1.0/suite.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
kind: Suite
apiVersion: test.gatekeeper.sh/v1alpha1
metadata:
name: containerprobes
tests:
- name: container-probes
template: template.yaml
constraint: samples/must-have-probes/constraint.yaml
cases:
- name: example-allowed
object: samples/must-have-probes/example_allowed.yaml
assertions:
- violations: no
- name: example-disallowed
object: samples/must-have-probes/example_disallowed.yaml
assertions:
- violations: yes
- name: example-disallowed2
object: samples/must-have-probes/example_disallowed2.yaml
assertions:
- violations: yes
- name: container-probes-only-services
template: template.yaml
constraint: samples/must-have-probes-on-service/constraint.yaml
cases:
- name: example-allowed-without-service
object: samples/must-have-probes-on-service/example_allowed_without_service.yaml
inventory:
- samples/must-have-probes-on-service/inventory.yaml
assertions:
- violations: no
- name: example-allowed-with-service
object: samples/must-have-probes-on-service/example_allowed_with_service.yaml
inventory:
- samples/must-have-probes-on-service/inventory.yaml
assertions:
- violations: no
- name: example-disallowed-with-service
object: samples/must-have-probes-on-service/example_disallowed_with_service.yaml
inventory:
- samples/must-have-probes-on-service/inventory.yaml
assertions:
- violations: yes
101 changes: 101 additions & 0 deletions artifacthub/library/general/requiredprobes/1.1.0/template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
apiVersion: templates.gatekeeper.sh/v1
kind: ConstraintTemplate
metadata:
name: k8srequiredprobes
annotations:
metadata.gatekeeper.sh/title: "Required Probes"
metadata.gatekeeper.sh/version: 1.1.0
metadata.gatekeeper.sh/requiresSyncData: |
"[
[
{
"groups":[""],
"versions": ["v1"],
"kinds": ["Service"]
}
]
]"
description: Requires Pods to have readiness and/or liveness probes.
spec:
crd:
spec:
names:
kind: K8sRequiredProbes
validation:
openAPIV3Schema:
type: object
properties:
onlyServices:
description: "Only apply to pods that are selected by a service"
type: boolean
probes:
description: "A list of probes that are required (ex: `readinessProbe`)"
type: array
items:
type: string
probeTypes:
description: "The probe must define a field listed in `probeType` in order to satisfy the constraint (ex. `tcpSocket` satisfies `['tcpSocket', 'exec']`)"
type: array
items:
type: string
customViolationMessage:
type: string
description: >-
Custom error message generated by a violation that is appended to the standard violation message
targets:
- target: admission.k8s.gatekeeper.sh
rego: |
package k8srequiredprobes
probe_type_set = probe_types {
probe_types := {type | type := input.parameters.probeTypes[_]}
}
violation[{"msg": msg}] {
not input.parameters.onlyServices
container := input.review.object.spec.containers[_]
probe := input.parameters.probes[_]
probe_is_missing(container, probe)
custom_msg := object.get(input.parameters, "customViolationMessage", "")
msg := trim(sprintf("Container <%v> in this <%v> has no <%v>. %v", [container.name, input.review.kind.kind, probe, custom_msg]), " ")
}
violation[{"msg": msg}] {
input.parameters.onlyServices
container := input.review.object.spec.containers[_]
probe := input.parameters.probes[_]
probe_is_missing(container, probe)
obj := input.review.object
svc := data.inventory.namespace[obj.metadata.namespace]["v1"]["Service"][_]
matchLabels := { [label, value] | some label; value := svc.spec.selector[label] }
labels := { [label, value] | some label; value := obj.metadata.labels[label] }
count(matchLabels - labels) == 0
matching_ports := [p | p := svc.spec.ports[_].targetPort; has_port(p, container)]
count(matching_ports) > 0
custom_msg := object.get(input.parameters, "customViolationMessage", "")
msg := trim(sprintf("Container <%v> in this <%v> has no <%v> and is selected by service <%v> with targetPort(s) %v. %v", [container.name, input.review.kind.kind, probe, svc.metadata.name, matching_ports, custom_msg]), " ")
}
has_port(targetPort, container){
targetPort == container.ports[_].containerPort
}
has_port(targetPort, container){
targetPort == container.ports[_].name
}
probe_is_missing(ctr, probe) = true {
not ctr[probe]
}
probe_is_missing(ctr, probe) = true {
probe_field_empty(ctr, probe)
}
probe_field_empty(ctr, probe) = true {
probe_fields := {field | ctr[probe][field]}
diff_fields := probe_type_set - probe_fields
count(diff_fields) == count(probe_type_set)
}
Loading

0 comments on commit 69d2913

Please sign in to comment.