diff --git a/tests/e2e-openshift/Dockerfile b/tests/e2e-openshift/Dockerfile index 5a9d46944d..5efd816d1f 100644 --- a/tests/e2e-openshift/Dockerfile +++ b/tests/e2e-openshift/Dockerfile @@ -26,7 +26,7 @@ RUN curl -LO https://github.com/kudobuilder/kuttl/releases/download/v0.15.0/kube && mv kubectl-kuttl_0.15.0_linux_x86_64 /usr/local/bin/kuttl # Install Chainsaw e2e -RUN go install github.com/kyverno/chainsaw@v0.1.7 +RUN go install github.com/kyverno/chainsaw@v0.2.0 # Install kubectl and oc RUN curl -LO https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/latest/openshift-client-linux.tar.gz \ diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/chainsaw-test.yaml b/tests/e2e-openshift/scrape-in-cluster-monitoring/chainsaw-test.yaml new file mode 100644 index 0000000000..e7616fa735 --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/chainsaw-test.yaml @@ -0,0 +1,26 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: scrape-in-cluster-monitoring +spec: + namespace: chainsaw-scrape-in-cluster-monitoring + steps: + - name: Create OTEL collector with Prometheus receiver to scrape in-cluster metrics + try: + - apply: + file: create-clusterrolebinding.yaml + - assert: + file: create-clusterrolebinding-assert.yaml + - apply: + file: create-otel-instance.yaml + - assert: + file: create-otel-instance-assert.yaml + - name: Wait for the metrics to be collected + try: + - sleep: + duration: 10s + - name: Check the presence of metrics in the OTEL collector + try: + - script: + timeout: 5m + content: ./check_logs.sh diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/check_logs.sh b/tests/e2e-openshift/scrape-in-cluster-monitoring/check_logs.sh new file mode 100755 index 0000000000..1531be4d2c --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/check_logs.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# This script checks the OpenTelemetry collector pod for the presence of Metrics. + +# Define the label selector +LABEL_SELECTOR="app.kubernetes.io/component=opentelemetry-collector" +NAMESPACE=chainsaw-scrape-in-cluster-monitoring + +# Define the search strings +SEARCH_STRING1='-> container' +SEARCH_STRING2='-> label_pod_security_kubernetes_io_audit: Str(restricted)' +SEARCH_STRING3='-> label_pod_security_kubernetes_io_enforce: Str(privileged)' +SEARCH_STRING4='-> label_kubernetes_io_metadata_name:' +SEARCH_STRING5='-> namespace:' + +# Initialize flags to track if strings are found +FOUND1=false +FOUND2=false +FOUND3=false +FOUND4=false +FOUND5=false + +# Loop until all strings are found +while ! $FOUND1 || ! $FOUND2 || ! $FOUND3 || ! $FOUND4 || ! $FOUND5; do + # Get the list of pods with the specified label + PODS=($(kubectl -n $NAMESPACE get pods -l $LABEL_SELECTOR -o jsonpath='{.items[*].metadata.name}')) + + # Loop through each pod and search for the strings in the logs + for POD in "${PODS[@]}"; do + # Search for the first string + if ! $FOUND1 && kubectl -n $NAMESPACE --tail=500 logs $POD | grep -q -- "$SEARCH_STRING1"; then + echo "\"$SEARCH_STRING1\" found in $POD" + FOUND1=true + fi + # Search for the second string + if ! $FOUND2 && kubectl -n $NAMESPACE --tail=500 logs $POD | grep -q -- "$SEARCH_STRING2"; then + echo "\"$SEARCH_STRING2\" found in $POD" + FOUND2=true + fi + # Search for the third string + if ! $FOUND3 && kubectl -n $NAMESPACE --tail=500 logs $POD | grep -q -- "$SEARCH_STRING3"; then + echo "\"$SEARCH_STRING3\" found in $POD" + FOUND3=true + fi + # Search for the fourth string + if ! $FOUND4 && kubectl -n $NAMESPACE --tail=500 logs $POD | grep -q -- "$SEARCH_STRING4"; then + echo "\"$SEARCH_STRING4\" found in $POD" + FOUND4=true + fi + # Search for the fifth string + if ! $FOUND5 && kubectl -n $NAMESPACE --tail=500 logs $POD | grep -q -- "$SEARCH_STRING5"; then + echo "\"$SEARCH_STRING5\" found in $POD" + FOUND5=true + fi + done +done + +echo "Found the matched metrics in collector" diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding-assert.yaml b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding-assert.yaml new file mode 100644 index 0000000000..5b9aac803e --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding-assert.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chainsaw-scrape-in-cluster-monitoring-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view +subjects: +- kind: ServiceAccount + name: otel-collector + namespace: chainsaw-scrape-in-cluster-monitoring + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cabundle + namespace: chainsaw-scrape-in-cluster-monitoring \ No newline at end of file diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding.yaml b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding.yaml new file mode 100644 index 0000000000..80e00a48b5 --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-clusterrolebinding.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chainsaw-scrape-in-cluster-monitoring-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view +subjects: + - kind: ServiceAccount + name: otel-collector + namespace: chainsaw-scrape-in-cluster-monitoring + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: cabundle + namespce: chainsaw-scrape-in-cluster-monitoring + annotations: + service.beta.openshift.io/inject-cabundle: "true" diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance-assert.yaml b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance-assert.yaml new file mode 100644 index 0000000000..ae6c0cc554 --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance-assert.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-collector + namespace: chainsaw-scrape-in-cluster-monitoring +status: + availableReplicas: 1 + readyReplicas: 1 + replicas: 1 + +--- +apiVersion: v1 +kind: Service +metadata: + name: otel-collector-monitoring + namespace: chainsaw-scrape-in-cluster-monitoring +spec: + ports: + - name: monitoring + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app.kubernetes.io/component: opentelemetry-collector + app.kubernetes.io/instance: chainsaw-scrape-in-cluster-monitoring.otel + app.kubernetes.io/managed-by: opentelemetry-operator + app.kubernetes.io/part-of: opentelemetry \ No newline at end of file diff --git a/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance.yaml b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance.yaml new file mode 100644 index 0000000000..f8d53af4ab --- /dev/null +++ b/tests/e2e-openshift/scrape-in-cluster-monitoring/create-otel-instance.yaml @@ -0,0 +1,46 @@ +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: otel + namespace: chainsaw-scrape-in-cluster-monitoring +spec: + volumeMounts: + - name: cabundle-volume + mountPath: /etc/pki/ca-trust/source/service-ca + readOnly: true + volumes: + - name: cabundle-volume + configMap: + name: cabundle + mode: deployment + config: | + receivers: + prometheus: + config: + scrape_configs: + - job_name: 'federate' + scrape_interval: 15s + scheme: https + tls_config: + ca_file: /etc/pki/ca-trust/source/service-ca/service-ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # honor_labels needs to be set to false due to bug https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/32555 + honor_labels: false + params: + 'match[]': + - '{__name__="kube_namespace_labels"}' + metrics_path: '/federate' + static_configs: + - targets: + - "prometheus-k8s.openshift-monitoring.svc.cluster.local:9091" + + exporters: + debug: + verbosity: detailed + + service: + pipelines: + metrics: + receivers: [prometheus] + processors: [] + exporters: [debug]