Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Allow Helm to manage namespace where objects are created. #1132

Merged
merged 2 commits into from
Mar 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions .github/workflows/workflow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ jobs:
strategy:
matrix:
HELM_VERSION: ["2.17.0", "3.4.2"]
GATEKEEPER_NAMESPACE: ["gatekeeper-system", "custom-namespace"]
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
Expand All @@ -145,13 +146,13 @@ jobs:
- name: Run e2e
run: |
make e2e-build-load-image IMG=gatekeeper-e2e-helm:latest
make e2e-helm-deploy HELM_REPO=gatekeeper-e2e-helm HELM_RELEASE=latest HELM_VERSION=${{ matrix.HELM_VERSION }}
make test-e2e
make e2e-helm-deploy HELM_REPO=gatekeeper-e2e-helm HELM_RELEASE=latest HELM_VERSION=${{ matrix.HELM_VERSION }} GATEKEEPER_NAMESPACE=${{ matrix.GATEKEEPER_NAMESPACE }}
make test-e2e GATEKEEPER_NAMESPACE=${{ matrix.GATEKEEPER_NAMESPACE }}

- name: Save logs
run: |
kubectl logs -n gatekeeper-system -l control-plane=controller-manager --tail=-1 > logs-helm-${{ matrix.HELM_VERSION }}-controller.json
kubectl logs -n gatekeeper-system -l control-plane=audit-controller --tail=-1 > logs-helm-${{ matrix.HELM_VERSION }}-audit.json
kubectl logs -n ${{ matrix.GATEKEEPER_NAMESPACE }} -l control-plane=controller-manager --tail=-1 > logs-helm-${{ matrix.HELM_VERSION }}-${{ matrix.GATEKEEPER_NAMESPACE }}-controller.json
kubectl logs -n ${{ matrix.GATEKEEPER_NAMESPACE }} -l control-plane=audit-controller --tail=-1 > logs-helm-${{ matrix.HELM_VERSION }}-${{ matrix.GATEKEEPER_NAMESPACE }}-audit.json

- name: Upload artifacts
uses: actions/upload-artifact@v2
Expand Down
17 changes: 13 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ KUSTOMIZE_VERSION ?= 3.8.8
BATS_VERSION ?= 1.2.1
KUBECTL_KUSTOMIZE_VERSION ?= 1.20.1-${KUSTOMIZE_VERSION}
HELM_VERSION ?= 2.17.0
HELM_ARGS ?=
GATEKEEPER_NAMESPACE ?= gatekeeper-system

BUILD_COMMIT := $(shell ./build/get-build-commit.sh)
BUILD_TIMESTAMP := $(shell ./build/get-build-timestamp.sh)
Expand All @@ -40,7 +42,7 @@ MANAGER_IMAGE_PATCH := "apiVersion: apps/v1\
\n - --port=8443\
\n - --logtostderr\
\n - --emit-admission-events\
\n - --exempt-namespace=gatekeeper-system\
\n - --exempt-namespace=${GATEKEEPER_NAMESPACE}\
\n - --operation=webhook\
\n---\
\napiVersion: apps/v1\
Expand Down Expand Up @@ -110,7 +112,7 @@ e2e-build-load-image: docker-buildx
kind load docker-image --name kind ${IMG}

e2e-verify-release: patch-image deploy test-e2e
echo -e '\n\n======= manager logs =======\n\n' && kubectl logs -n gatekeeper-system -l control-plane=controller-manager
echo -e '\n\n======= manager logs =======\n\n' && kubectl logs -n ${GATEKEEPER_NAMESPACE} -l control-plane=controller-manager

e2e-helm-install:
rm -rf .staging/helm
Expand All @@ -120,13 +122,20 @@ e2e-helm-install:
./.staging/helm/linux-amd64/helm version --client

e2e-helm-deploy: e2e-helm-install
ifneq ($(GATEKEEPER_NAMESPACE),gatekeeper-system)
kubectl create namespace $(GATEKEEPER_NAMESPACE) --dry-run=client -o yaml | kubectl apply -f -
kubectl label ns $(GATEKEEPER_NAMESPACE) admission.gatekeeper.sh/ignore=no-self-managing
kubectl label ns $(GATEKEEPER_NAMESPACE) gatekeeper.sh/system="yes"
$(eval HELM_ARGS := --namespace $(GATEKEEPER_NAMESPACE) --set createNamespace=false)
endif

@if [ $$(echo ${HELM_VERSION} | head -c 1) = "2" ]; then\
kubectl create clusterrolebinding tiller-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default;\
./.staging/helm/linux-amd64/helm init --wait --history-max=5;\
kubectl -n kube-system wait --for=condition=Ready pod -l name=tiller --timeout=300s;\
./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name=gatekeeper --debug --set image.repository=${HELM_REPO} --set image.release=${HELM_RELEASE} --set emitAdmissionEvents=true --set emitAuditEvents=true;\
./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name=gatekeeper --debug ${HELM_ARGS} --set image.repository=${HELM_REPO} --set image.release=${HELM_RELEASE} --set emitAdmissionEvents=true --set emitAuditEvents=true;\
else\
./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper --debug --set image.repository=${HELM_REPO} --set image.release=${HELM_RELEASE} --set emitAdmissionEvents=true --set emitAuditEvents=true;\
./.staging/helm/linux-amd64/helm install manifest_staging/charts/gatekeeper --name-template=gatekeeper ${HELM_ARGS} --debug --set image.repository=${HELM_REPO} --set image.release=${HELM_RELEASE} --set emitAdmissionEvents=true --set emitAuditEvents=true;\
fi;

e2e-helm-upgrade-init: e2e-helm-install
Expand Down
1 change: 1 addition & 0 deletions cmd/build/helmify/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
namespace: '{{ include "gatekeeper.namespace" . }}'
commonLabels:
app: '{{ template "gatekeeper.name" . }}'
chart: '{{ template "gatekeeper.name" . }}'
Expand Down
2 changes: 1 addition & 1 deletion cmd/build/helmify/kustomize-for-helm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ spec:
- --log-denies={{ .Values.logDenies }}
- --emit-admission-events={{ .Values.emitAdmissionEvents }}
- --log-level={{ .Values.logLevel }}
- --exempt-namespace=gatekeeper-system
- --exempt-namespace={{ include "gatekeeper.namespace" . }}
- --operation=webhook
imagePullPolicy: "{{ .Values.image.pullPolicy }}"
image: "{{ .Values.image.repository }}:{{ .Values.image.release }}"
Expand Down
10 changes: 10 additions & 0 deletions cmd/build/helmify/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,13 @@ func extractName(s string) (string, error) {
return strings.Trim(matches[1], `"'`), nil
}

//extractNamespaceName returns the default "gatekeeper-system" namespace.
//Because the namespace is '{{ include "gatekeeper.namespace" . }}' it fails to be found in extractName.
//There should only ever be 1 Namespace so this should be safe
func extractNamespaceName(s string) (string, error) {
return "gatekeeper-system", nil
}

func extractCRDKind(obj string) (string, error) {
crd := &apiextensionsv1beta1.CustomResourceDefinition{}
if err := yaml.Unmarshal([]byte(obj), crd); err != nil {
Expand Down Expand Up @@ -81,7 +88,10 @@ func (ks *kindSet) Write() error {
return err
}
}
} else if kind == "Namespace" {
nameExtractor = extractNamespaceName
}

for _, obj := range objs {
name, err := nameExtractor(obj)
if err != nil {
Expand Down
12 changes: 12 additions & 0 deletions cmd/build/helmify/static/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
{{/*
If createNamespace is set to true, sets the namespace to "gatekeeper-system"
Copy link
Member

@sozercan sozercan Feb 24, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PR looks great! This is a little confusing, is it possible to use createNamespace to create any namespace?

helm install ... -> installs to gatekeeper-system
helm install --set createNamespace=false ... -> installs to gatekeeper-system (namespace should exist before)
helm install --namespace myNamespace ... -> installs to myNamespace (creating namespace)
helm install --namespace myNamespace --set createNamespace=false ... -> installs to myNamespace (namespace should exist before)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @sozercan. I agree that the createNamespace is confusing and was implemented before this change. It is considered best practice to not create namespaces in a Helm chart. That being said here is how this works.

By default in the values.yaml file createNamespace: true

So with that

helm install .... -> Creates the gatekeeper-system namespace and resources are installed there. Helm will manage the chart in the default namespace.

helm install --set createNamespace=false ... -> no namespace is created and one isn't specified in the helm command so it is installed in the default namespace

helm install -n myNamespace ... -> gatekeeper-system namespace is created and helm installs the resources there, however the chart is managed in myNamespace namespace. This is what happens today as well.

helm install -n myNamespace --set createNamespace=false ... helm installs the resources to myNamespace and manages the chart there.

When I say "manages the chart in this namespace", I am referring to if you where to run helm list -n <namespace> you would see charts "installed" in that namespace. The way it is now you are likely to install the resources in 1 namespace and have the chart managed in another.

Currently today how we install the chart is helm install -n gatekeeper-system --set createNamespace=false gatekeeper gatekeeper/gatekeeper --create-namespace This makes everything fairly clear what is going on and everything contained to 1 namespace, however we are stuck with using gatekeeper-system as our namespace.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't know there was a --create-namespace flag in Helm 3. I think we should deprecate the createNamespace in values when Helm 2 chart is removed.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to update the readme/docs after we cut a new release to reflect these chart changes so we have better guidance for what is the recommended way to install this chart?

ie. install all the gk resources and the chart releases in the same namespace vs install all the gk resources in the gatekeeper-system ns while the chart releases are in another namespace?

If createNamespace is set to false, sets the namespace to the {{ .Release.Namespace }}
*/}}
{{- define "gatekeeper.namespace" -}}
{{- if .Values.createNamespace }}
{{- printf "gatekeeper-system" }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end -}}

{{/*
Expand the name of the chart.
*/}}
Expand Down
12 changes: 12 additions & 0 deletions manifest_staging/charts/gatekeeper/templates/_helpers.tpl
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
{{/*
If createNamespace is set to true, sets the namespace to "gatekeeper-system"
If createNamespace is set to false, sets the namespace to the {{ .Release.Namespace }}
*/}}
{{- define "gatekeeper.namespace" -}}
{{- if .Values.createNamespace }}
{{- printf "gatekeeper-system" }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end -}}

{{/*
Expand the name of the chart.
*/}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-admin
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-audit
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
spec:
replicas: 1
selector:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-controller-manager
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
spec:
replicas: {{ .Values.replicas }}
selector:
Expand Down Expand Up @@ -46,7 +46,7 @@ spec:
- --log-denies={{ .Values.logDenies }}
- --emit-admission-events={{ .Values.emitAdmissionEvents }}
- --log-level={{ .Values.logLevel }}
- --exempt-namespace=gatekeeper-system
- --exempt-namespace={{ include "gatekeeper.namespace" . }}
- --operation=webhook
command:
- /manager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-controller-manager
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
spec:
minAvailable: {{ .Values.pdb.controllerManager.minAvailable }}
selector:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-manager-role
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
rules:
- apiGroups:
- ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: gatekeeper-admin
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-manager-rolebinding
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gatekeeper-manager-role
subjects:
- kind: ServiceAccount
name: gatekeeper-admin
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,5 @@ metadata:
gatekeeper.sh/system: "yes"
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-system
name: '{{ include "gatekeeper.namespace" . }}'
{{- end }}
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ webhooks:
caBundle: Cg==
service:
name: gatekeeper-webhook-service
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
path: /v1/admit
failurePolicy: Ignore
name: validation.gatekeeper.sh
Expand All @@ -41,7 +41,7 @@ webhooks:
caBundle: Cg==
service:
name: gatekeeper-webhook-service
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
path: /v1/admitlabel
failurePolicy: Fail
name: check-ignore-label.gatekeeper.sh
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-webhook-server-cert
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ metadata:
heritage: '{{ .Release.Service }}'
release: '{{ .Release.Name }}'
name: gatekeeper-webhook-service
namespace: gatekeeper-system
namespace: '{{ include "gatekeeper.namespace" . }}'
spec:
ports:
- port: 443
Expand Down
6 changes: 3 additions & 3 deletions test/bats/helpers.bash
Original file line number Diff line number Diff line change
Expand Up @@ -67,16 +67,16 @@ wait_for_process() {

get_ca_cert() {
destination="$1"
if [ $(kubectl get secret -n gatekeeper-system gatekeeper-webhook-server-cert -o jsonpath='{.data.ca\.crt}' | wc -w) -eq 0 ]; then
if [ $(kubectl get secret -n ${GATEKEEPER_NAMESPACE} gatekeeper-webhook-server-cert -o jsonpath='{.data.ca\.crt}' | wc -w) -eq 0 ]; then
return 1
fi
kubectl get secret -n gatekeeper-system gatekeeper-webhook-server-cert -o jsonpath='{.data.ca\.crt}' | base64 -d >$destination
kubectl get secret -n ${GATEKEEPER_NAMESPACE} gatekeeper-webhook-server-cert -o jsonpath='{.data.ca\.crt}' | base64 -d >$destination
}

constraint_enforced() {
local kind="$1"
local name="$2"
local pod_list="$(kubectl -n gatekeeper-system get pod -l gatekeeper.sh/operation=webhook -o json)"
local pod_list="$(kubectl -n ${GATEKEEPER_NAMESPACE} get pod -l gatekeeper.sh/operation=webhook -o json)"
if [[ $? -ne 0 ]]; then
echo "error gathering pods"
return 1
Expand Down
29 changes: 15 additions & 14 deletions test/bats/test.bats
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ BATS_TESTS_DIR=test/bats/tests
WAIT_TIME=120
SLEEP_TIME=1
CLEAN_CMD="echo cleaning..."
GATEKEEPER_NAMESPACE=${GATEKEEPER_NAMESPACE:-gatekeeper-system}

teardown() {
bash -c "${CLEAN_CMD}"
Expand All @@ -14,15 +15,15 @@ teardown() {
teardown_file() {
kubectl delete ns gatekeeper-test-playground gatekeeper-excluded-namespace || true
kubectl delete constrainttemplates k8scontainerlimits k8srequiredlabels k8suniquelabel || true
kubectl delete configs.config.gatekeeper.sh config -n gatekeeper-system || true
kubectl delete configs.config.gatekeeper.sh config -n ${GATEKEEPER_NAMESPACE} || true
}

@test "gatekeeper-controller-manager is running" {
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl -n gatekeeper-system wait --for=condition=Ready --timeout=60s pod -l control-plane=controller-manager"
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl -n ${GATEKEEPER_NAMESPACE} wait --for=condition=Ready --timeout=60s pod -l control-plane=controller-manager"
}

@test "gatekeeper-audit is running" {
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl -n gatekeeper-system wait --for=condition=Ready --timeout=60s pod -l control-plane=audit-controller"
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl -n ${GATEKEEPER_NAMESPACE} wait --for=condition=Ready --timeout=60s pod -l control-plane=audit-controller"
}

@test "namespace label webhook is serving" {
Expand All @@ -34,7 +35,7 @@ teardown_file() {
kubectl wait --for=condition=Ready --timeout=60s pod temp
kubectl cp ${cert} temp:/cacert

wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl exec -it temp -- curl -f --cacert /cacert --connect-timeout 1 --max-time 2 https://gatekeeper-webhook-service.gatekeeper-system.svc:443/v1/admitlabel"
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl exec -it temp -- curl -f --cacert /cacert --connect-timeout 1 --max-time 2 https://gatekeeper-webhook-service.${GATEKEEPER_NAMESPACE}.svc:443/v1/admitlabel"
kubectl delete pod temp
}

Expand Down Expand Up @@ -75,7 +76,7 @@ teardown_file() {
}

@test "applying sync config" {
kubectl apply -f ${BATS_TESTS_DIR}/sync.yaml
kubectl apply -n ${GATEKEEPER_NAMESPACE} -f ${BATS_TESTS_DIR}/sync.yaml
}

# creating namespaces and audit constraints early so they will have time to reconcile
Expand All @@ -95,8 +96,8 @@ teardown_file() {
assert_failure
}

@test "gatekeeper-system ignore label can be patched" {
kubectl patch ns gatekeeper-system --type=json -p='[{"op": "replace", "path": "/metadata/labels/admission.gatekeeper.sh~1ignore", "value": "ignore-label-test-passed"}]'
@test "gatekeeper ns ignore label can be patched" {
kubectl patch ns ${GATEKEEPER_NAMESPACE} --type=json -p='[{"op": "replace", "path": "/metadata/labels/admission.gatekeeper.sh~1ignore", "value": "ignore-label-test-passed"}]'
}

@test "required labels dryrun test" {
Expand Down Expand Up @@ -140,8 +141,8 @@ teardown_file() {
kubectl wait --for=condition=Ready --timeout=60s pod temp

num_namespaces=$(kubectl get ns -o json | jq '.items | length')
local pod_ip="$(kubectl -n gatekeeper-system get pod -l gatekeeper.sh/operation=webhook -ojson | jq --raw-output '[.items[].status.podIP][0]' | sed 's#\.#-#g')"
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl exec -it temp -- curl http://${pod_ip}.gatekeeper-system.pod:8888/metrics | grep 'gatekeeper_sync{kind=\"Namespace\",status=\"active\"} ${num_namespaces}'"
local pod_ip="$(kubectl -n ${GATEKEEPER_NAMESPACE} get pod -l gatekeeper.sh/operation=webhook -ojson | jq --raw-output '[.items[].status.podIP][0]' | sed 's#\.#-#g')"
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl exec -it temp -- curl http://${pod_ip}.${GATEKEEPER_NAMESPACE}.pod:8888/metrics | grep 'gatekeeper_sync{kind=\"Namespace\",status=\"active\"} ${num_namespaces}'"
kubectl delete pod temp
}

Expand Down Expand Up @@ -185,14 +186,14 @@ __required_labels_audit_test() {

@test "emit events test" {
# list events for easy debugging
kubectl get events -n gatekeeper-system
events=$(kubectl get events -n gatekeeper-system --field-selector reason=FailedAdmission -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
kubectl get events -n ${GATEKEEPER_NAMESPACE}
events=$(kubectl get events -n ${GATEKEEPER_NAMESPACE} --field-selector reason=FailedAdmission -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
[[ "$events" -ge 1 ]]

events=$(kubectl get events -n gatekeeper-system --field-selector reason=DryrunViolation -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
events=$(kubectl get events -n ${GATEKEEPER_NAMESPACE} --field-selector reason=DryrunViolation -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
[[ "$events" -ge 1 ]]

events=$(kubectl get events -n gatekeeper-system --field-selector reason=AuditViolation -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
events=$(kubectl get events -n ${GATEKEEPER_NAMESPACE} --field-selector reason=AuditViolation -o json | jq -r '.items[] | select(.metadata.annotations.constraint_kind=="K8sRequiredLabels" )' | jq -s '. | length')
[[ "$events" -ge 1 ]]
}

Expand All @@ -204,6 +205,6 @@ __required_labels_audit_test() {
assert_match 'denied the request' "${output}"
assert_failure

kubectl apply -f ${BATS_TESTS_DIR}/sync_with_exclusion.yaml
kubectl apply -n ${GATEKEEPER_NAMESPACE} -f ${BATS_TESTS_DIR}/sync_with_exclusion.yaml
wait_for_process ${WAIT_TIME} ${SLEEP_TIME} "kubectl create configmap should-succeed -n gatekeeper-excluded-namespace"
}
1 change: 0 additions & 1 deletion test/bats/tests/sync.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ apiVersion: config.gatekeeper.sh/v1alpha1
kind: Config
metadata:
name: config
namespace: "gatekeeper-system"
spec:
sync:
syncOnly:
Expand Down
1 change: 0 additions & 1 deletion test/bats/tests/sync_with_exclusion.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ apiVersion: config.gatekeeper.sh/v1alpha1
kind: Config
metadata:
name: config
namespace: "gatekeeper-system"
spec:
match:
- excludedNamespaces: ["gatekeeper-excluded-namespace"]
Expand Down