Skip to content

Commit

Permalink
MON-3775: add tests for CMO collection profiles
Browse files Browse the repository at this point in the history
The tests cover the following cases:
* Apply every collection profile, and check if the expected
  `ServiceMonitor`s are available.
* CMO configuration reconciliation after a collection profile is applied
  is checked within the CMO tests itself.
* Prometheus `*monitor` selectors' reconciliation after a collection
  profile is applied is checked within the CMO tests itself.

Refer: openshift/enhancements#1298

Signed-off-by: Pranshu Srivastava <rexagod@gmail.com>
  • Loading branch information
rexagod committed May 5, 2024
1 parent ab28660 commit 7625d2f
Show file tree
Hide file tree
Showing 3 changed files with 211 additions and 1 deletion.
3 changes: 2 additions & 1 deletion pkg/defaultmonitortests/types.go
Expand Up @@ -2,7 +2,6 @@ package defaultmonitortests

import (
"fmt"

"github.com/openshift/origin/pkg/monitortestframework"
"github.com/openshift/origin/pkg/monitortests/authentication/legacyauthenticationmonitortests"
"github.com/openshift/origin/pkg/monitortests/authentication/requiredsccmonitortests"
Expand All @@ -18,6 +17,7 @@ import (
"github.com/openshift/origin/pkg/monitortests/kubeapiserver/disruptionlegacyapiservers"
"github.com/openshift/origin/pkg/monitortests/kubeapiserver/disruptionnewapiserver"
"github.com/openshift/origin/pkg/monitortests/kubeapiserver/legacykubeapiservermonitortests"
"github.com/openshift/origin/pkg/monitortests/monitoring/collectionprofiles"
"github.com/openshift/origin/pkg/monitortests/monitoring/statefulsetsrecreation"
"github.com/openshift/origin/pkg/monitortests/network/disruptioningress"
"github.com/openshift/origin/pkg/monitortests/network/disruptionpodnetwork"
Expand Down Expand Up @@ -118,6 +118,7 @@ func newDefaultMonitorTests(info monitortestframework.MonitorTestInitializationI
monitorTestRegistry.AddMonitorTestOrDie("disruption-summary-serializer", "Test Framework", disruptionserializer.NewDisruptionSummarySerializer())

monitorTestRegistry.AddMonitorTestOrDie("monitoring-statefulsets-recreation", "Monitoring", statefulsetsrecreation.NewStatefulsetsChecker())
monitorTestRegistry.AddMonitorTestOrDie("monitoring-collection-profiles", "Monitoring", collectionprofiles.NewAvailabilityChecker())

return monitorTestRegistry
}
Expand Down
193 changes: 193 additions & 0 deletions pkg/monitortests/monitoring/collectionprofiles/monitortest.go
@@ -0,0 +1,193 @@
package collectionprofiles

import (
"context"
"fmt"
"github.com/openshift/origin/pkg/monitor/monitorapi"
"github.com/openshift/origin/pkg/monitortestframework"
"github.com/openshift/origin/pkg/test/ginkgo/junitapi"
exutil "github.com/openshift/origin/test/extended/util"
pov1 "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/kube-openapi/pkg/util/sets"
"time"
)

const (
testName = "[sig-instrumentation] Monitoring collection profiles have all service monitor counterparts present"

operatorConfigurationName = "cluster-monitoring-config"
operatorNamespace = "openshift-monitoring"
associationLabel = "app.kubernetes.io/part-of"

collectionProfileLabel = "monitoring.openshift.io/collection-profile"
fullCollectionProfile = "full"
minimalCollectionProfile = "minimal"
defaultCollectionProfile = fullCollectionProfile
)

var (
poClient *pov1.MonitoringV1Client

supportedNonDefaultCollectionProfiles = sets.NewString(
minimalCollectionProfile,
)
operatorCollectionProfileToComponents = map[string]sets.String{
minimalCollectionProfile: sets.NewString(
"kube-state-metrics",
"prometheus-adapter",
"node-exporter",
"control-plane",
),
}
)

type checker struct {
client kubernetes.Interface
notSupportedReason error
}

func NewAvailabilityChecker() monitortestframework.MonitorTest {
return &checker{}
}

func (c *checker) StartCollection(ctx context.Context, adminRESTConfig *rest.Config, _ monitorapi.RecorderWriter) error {
var err error

// Instantiate the Kubernetes client.
c.client, err = kubernetes.NewForConfig(adminRESTConfig)
if err != nil {
return err
}
poClient, err = pov1.NewForConfig(adminRESTConfig)
if err != nil {
return err
}

// Check for unsupported cluster configurations.
supported := exutil.IsTechPreviewNoUpgrade(exutil.NewCLIWithoutNamespace("instrumentation-collection-profiles-minimal").AsAdmin())
if !supported {
c.notSupportedReason = &monitortestframework.NotSupportedError{
Reason: "collection-profiles are only available in tech-preview clusters",
}
return c.notSupportedReason
}

// Configure the operator.
return c.makeCollectionProfileConfigurationFor(ctx, "")
}

func (c *checker) CollectData(context.Context, string, time.Time, time.Time) (monitorapi.Intervals, []*junitapi.JUnitTestCase, error) {
if c.notSupportedReason != nil {
return nil, nil, c.notSupportedReason
}
return nil, nil, nil
}

func (c *checker) ConstructComputedIntervals(context.Context, monitorapi.Intervals, monitorapi.ResourcesMap, time.Time, time.Time) (constructedIntervals monitorapi.Intervals, err error) {
return nil, nil
}

func (c *checker) EvaluateTestsFromConstructedIntervals(ctx context.Context, _ monitorapi.Intervals) ([]*junitapi.JUnitTestCase, error) {
// Check all supported collection profiles.
for collectionProfile := range supportedNonDefaultCollectionProfiles {

// Configure the operator for the current profile.
err := c.makeCollectionProfileConfigurationFor(ctx, collectionProfile)
if err != nil {
return nil, err
}

// Poll until all expected service monitors are verified.
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) {
serviceMonitors, err := poClient.ServiceMonitors(operatorNamespace).List(ctx, metav1.ListOptions{

// Only check service monitors that:
// * are deployed by the operator, and,
// * associated with the current collection profile.
LabelSelector: fmt.Sprintf("%s=%s,%s=%s", collectionProfileLabel, collectionProfile, associationLabel, operatorNamespace),
})
if err != nil {
return false, err
}
componentsForCollectionProfile := operatorCollectionProfileToComponents[collectionProfile]
if len(serviceMonitors.Items) != len(componentsForCollectionProfile) {
return false, fmt.Errorf("expected %d service monitors, got %d", len(componentsForCollectionProfile), len(serviceMonitors.Items))
}
for _, serviceMonitor := range serviceMonitors.Items {
if !componentsForCollectionProfile.Has(serviceMonitor.Name) {
return false, fmt.Errorf("encountered unexpected service monitor %s", serviceMonitor.Name)
}
}
return true, nil
})
if err != nil {
return []*junitapi.JUnitTestCase{
{
Name: testName,
SystemOut: fmt.Sprintf("found discrepancy in service monitors for the %s collection profile", collectionProfile),
FailureOutput: &junitapi.FailureOutput{
Output: fmt.Sprintf("failed to verify service monitors for collection profile %s: %v", collectionProfile, err),
},
},
}, err
}
}

return []*junitapi.JUnitTestCase{{Name: testName}}, nil
}

func (c *checker) WriteContentToStorage(context.Context, string, string, monitorapi.Intervals, monitorapi.ResourcesMap) error {
return nil
}

func (c *checker) Cleanup(ctx context.Context) error {
// Cleanup the operator configuration.
var err error
_, err = c.client.CoreV1().ConfigMaps(operatorNamespace).Get(ctx, operatorConfigurationName, metav1.GetOptions{})
if err == nil {
err = c.client.CoreV1().ConfigMaps(operatorNamespace).Delete(ctx, operatorConfigurationName, metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}

func (c *checker) makeCollectionProfileConfigurationFor(ctx context.Context, collectionProfile string) error {
// Default to the full collection profile.
if collectionProfile == "" {
collectionProfile = defaultCollectionProfile
}

// Create a configuration for the operator based on the current profile.
configuration := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: operatorConfigurationName,
Namespace: operatorNamespace,
},
Data: map[string]string{
"config.yaml": fmt.Sprintf("prometheusk8s:\n collectionProfile: %s\n", collectionProfile),
},
}

// Create the configuration if it does not exist, otherwise update it.
_, err := c.client.CoreV1().ConfigMaps(operatorNamespace).Get(ctx, operatorConfigurationName, metav1.GetOptions{})
if err != nil {
_, err = c.client.CoreV1().ConfigMaps(operatorNamespace).Create(ctx, configuration, metav1.CreateOptions{})
if err != nil {
return err
}
} else {
_, err = c.client.CoreV1().ConfigMaps(operatorNamespace).Update(ctx, configuration, metav1.UpdateOptions{})
if err != nil {
return err
}
}

return nil
}
16 changes: 16 additions & 0 deletions pkg/monitortests/monitoring/collectionprofiles/monitortest_test.go
@@ -0,0 +1,16 @@
package collectionprofiles

import (
"testing"
)

func TestEnsureCollectionProfilesDefinitionConsistency(t *testing.T) {
if len(operatorCollectionProfileToComponents) != len(supportedNonDefaultCollectionProfiles) {
t.Fatalf("got %d collection profiles, expected %d", len(operatorCollectionProfileToComponents), len(supportedNonDefaultCollectionProfiles))
}
for profile := range operatorCollectionProfileToComponents {
if !supportedNonDefaultCollectionProfiles.Has(profile) {
t.Fatalf("found unexpected collection profile %s", profile)
}
}
}

0 comments on commit 7625d2f

Please sign in to comment.