Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions central/cluster/datastore/datastore.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@ type DataStore interface {
SearchResults(ctx context.Context, q *v1.Query) ([]*v1.SearchResult, error)

LookupOrCreateClusterFromConfig(ctx context.Context, clusterID, bundleID string, hello *central.SensorHello) (*storage.Cluster, error)

MatchProcessIndicator(ctx context.Context, indicator *storage.ProcessIndicator) (bool, error)
}

// New returns an instance of DataStore.
Expand Down Expand Up @@ -120,6 +122,7 @@ func New(
clusterRanker: clusterRanker,
networkBaselineMgr: networkBaselineMgr,
idToNameCache: simplecache.New(),
idToRegexCache: simplecache.New(),
nameToIDCache: simplecache.New(),
compliancePruner: compliancePruner,
clusterInitStore: clusterInitStore,
Expand Down
42 changes: 38 additions & 4 deletions central/cluster/datastore/datastore_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package datastore
import (
"context"
"fmt"
"regexp"
"slices"
"strings"
"time"
Expand Down Expand Up @@ -34,7 +35,7 @@ import (
"github.com/stackrox/rox/generated/internalapi/central"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/pkg/centralsensor"
clusterValidation "github.com/stackrox/rox/pkg/cluster"
clusterPkg "github.com/stackrox/rox/pkg/cluster"
"github.com/stackrox/rox/pkg/concurrency"
"github.com/stackrox/rox/pkg/env"
"github.com/stackrox/rox/pkg/errox"
Expand Down Expand Up @@ -94,8 +95,9 @@ type datastoreImpl struct {
notifier notifierProcessor.Processor
clusterRanker *ranking.Ranker

idToNameCache simplecache.Cache
nameToIDCache simplecache.Cache
idToNameCache simplecache.Cache
idToRegexCache simplecache.Cache
nameToIDCache simplecache.Cache

lock sync.Mutex
}
Expand Down Expand Up @@ -179,6 +181,10 @@ func (ds *datastoreImpl) buildCache(ctx context.Context) error {

for _, c := range clusters {
ds.idToNameCache.Add(c.GetId(), c.GetName())
namespaceFilter := clusterPkg.GetNamespaceFilter(c)
log.Infof("Setting namespace filter for cluster %s (%s): %q", c.GetName(), c.GetId(), namespaceFilter)
ds.idToRegexCache.Add(c.GetId(),
Comment on lines +184 to +186
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): Consider what regex is stored when no runtime data control is configured to avoid over-filtering

GetNamespaceFilter’s return value is compiled and cached here. If it returns an empty string, regexp.MustCompile("") matches all namespaces, and with MatchProcessIndicator’s “exclude on match” behavior this would suppress all indicators when runtime data control is unset/defaulted.

This logic therefore depends on GetNamespaceFilter returning a “match nothing” pattern (or similar) when no filtering is desired; otherwise the cache causes unintended global suppression tied to the default behavior of GetNamespaceFilter.

regexp.MustCompile(namespaceFilter))
ds.nameToIDCache.Add(c.GetName(), c.GetId())
c.HealthStatus = clusterHealthStatuses[c.GetId()]
}
Expand Down Expand Up @@ -333,6 +339,29 @@ func (ds *datastoreImpl) GetClusterName(ctx context.Context, id string) (string,
return val.(string), true, nil
}

func (ds *datastoreImpl) MatchProcessIndicator(ctx context.Context,
indicator *storage.ProcessIndicator) (bool, error) {

id := indicator.GetClusterId()

if ok, err := clusterSAC.ReadAllowed(ctx, sac.ClusterScopeKey(id)); err != nil || !ok {
return false, err
}

filter, ok := ds.idToRegexCache.Get(id)
if !ok {
log.Debugf("No namespace filter found for cluster %s", id)
return false, nil
}

matched := filter.(*regexp.Regexp).MatchString(indicator.GetNamespace())
if matched {
log.Debugf("Process indicator namespace %q matched filter for cluster %s, excluding from persistence",
indicator.GetNamespace(), id)
}
return matched, nil
}

func (ds *datastoreImpl) Exists(ctx context.Context, id string) (bool, error) {
if ok, err := clusterSAC.ReadAllowed(ctx, sac.ClusterScopeKey(id)); err != nil || !ok {
return false, err
Expand Down Expand Up @@ -585,6 +614,7 @@ func (ds *datastoreImpl) RemoveCluster(ctx context.Context, id string, done *con
return errors.Wrapf(err, "failed to remove cluster %q", id)
}
ds.idToNameCache.Remove(id)
ds.idToRegexCache.Remove(id)
ds.nameToIDCache.Remove(cluster.GetName())

deleteRelatedCtx := sac.WithAllAccess(context.Background())
Expand Down Expand Up @@ -892,6 +922,10 @@ func (ds *datastoreImpl) updateClusterNoLock(ctx context.Context, cluster *stora
return err
}
ds.idToNameCache.Add(cluster.GetId(), cluster.GetName())
namespaceFilter := clusterPkg.GetNamespaceFilter(cluster)
log.Infof("Updating namespace filter for cluster %s (%s): %q", cluster.GetName(), cluster.GetId(), namespaceFilter)
ds.idToRegexCache.Add(cluster.GetId(),
regexp.MustCompile(namespaceFilter))
ds.nameToIDCache.Add(cluster.GetName(), cluster.GetId())
return nil
}
Expand Down Expand Up @@ -1097,7 +1131,7 @@ func normalizeCluster(cluster *storage.Cluster) error {
}

func validateInput(cluster *storage.Cluster) error {
return clusterValidation.Validate(cluster).ToError()
return clusterPkg.Validate(cluster).ToError()
}

// addDefaults enriches the provided non-nil cluster object with defaults for
Expand Down
72 changes: 72 additions & 0 deletions central/cluster/datastore/datastore_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package datastore

import (
"errors"
"regexp"
"testing"
"time"

Expand All @@ -26,6 +27,7 @@ import (
serviceAccountDataStoreMocks "github.com/stackrox/rox/central/serviceaccount/datastore/mocks"
"github.com/stackrox/rox/generated/internalapi/central"
"github.com/stackrox/rox/generated/storage"
clusterPkg "github.com/stackrox/rox/pkg/cluster"
"github.com/stackrox/rox/pkg/concurrency"
"github.com/stackrox/rox/pkg/features"
"github.com/stackrox/rox/pkg/fixtures/fixtureconsts"
Expand Down Expand Up @@ -129,6 +131,7 @@ func (s *clusterDataStoreTestSuite) SetupTest() {
networkBaselineMgr: s.networkBaselineMgr,
compliancePruner: s.compliancePruner,
idToNameCache: simplecache.New(),
idToRegexCache: simplecache.New(),
nameToIDCache: simplecache.New(),
}
}
Expand Down Expand Up @@ -826,3 +829,72 @@ func (s *clusterDataStoreTestSuite) TestGetClusterLabels() {
})
}
}

func (s *clusterDataStoreTestSuite) TestProcessMatching() {
clusterID := fixtureconsts.Cluster1
testCluster := &storage.Cluster{
Id: clusterID,
Name: "test",
ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART,
HelmConfig: &storage.CompleteClusterConfig{
DynamicConfig: &storage.DynamicClusterConfig{
RuntimeDataControl: &storage.DynamicClusterConfig_RuntimeDataControl{
NamespaceFilter: "test-.*",
Comment on lines +833 to +842
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Consider broadening TestProcessMatching to cover clusters without runtimeDataControl and non-Helm-managed clusters

Currently this only exercises regex matching for a Helm-managed cluster with a non-nil RuntimeDataControl. To better cover MatchProcessIndicator and namespace filtering, please also add:

  • A case where DynamicConfig/RuntimeDataControl is nil, asserting the expected default behavior (e.g., false without error).
  • A case for a MANUAL-managed cluster to verify it uses the expected namespace filter source and that the cache path behaves correctly.

This will validate matching behavior across the main cluster management/configuration variants.

Suggested implementation:

func (s *clusterDataStoreTestSuite) TestProcessMatching() {
	ctx := context.Background()

	helmClusterID := fixtureconsts.Cluster1
	manualClusterID := fixtureconsts.Cluster2

	// Helm-managed cluster with non-nil RuntimeDataControl; Regex should match "test-.*" namespaces.
	helmCluster := &storage.Cluster{
		Id:        helmClusterID,
		Name:      "helm-cluster",
		ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART,
		HelmConfig: &storage.CompleteClusterConfig{
			DynamicConfig: &storage.DynamicClusterConfig{
				RuntimeDataControl: &storage.DynamicClusterConfig_RuntimeDataControl{
					NamespaceFilter: "test-.*",
					Persistence:     true,
				},
			},
		},
	}

	// Helm-managed cluster with nil DynamicConfig/RuntimeDataControl; should take the default path
	// and return "not matched" without error.
	nilRuntimeControlCluster := &storage.Cluster{
		Id:        uuid.NewV4().String(),
		Name:      "no-runtime-control",
		ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART,
		HelmConfig: &storage.CompleteClusterConfig{
			// DynamicConfig intentionally nil.
		},
	}

	// MANUAL-managed cluster to exercise non-Helm cache path and namespace filter source.
	// For these tests we rely on the default behavior (no RuntimeDataControl) and verify that
	// we still get a deterministic non-error result.
	manualCluster := &storage.Cluster{
		Id:        manualClusterID,
		Name:      "manual-cluster",
		ManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL,
		// No HelmConfig – this should force the datastore down the MANUAL-management path.
	}

	// Upsert clusters so MatchProcessIndicator can look them up.
	require.NoError(s.T(), s.datastore.UpsertCluster(ctx, helmCluster))
	require.NoError(s.T(), s.datastore.UpsertCluster(ctx, nilRuntimeControlCluster))
	require.NoError(s.T(), s.datastore.UpsertCluster(ctx, manualCluster))

	// Process indicators to exercise regex matching and default behavior.
	matchingIndicator := &storage.ProcessIndicator{
		ClusterId: helmClusterID,
		Namespace: "test-namespace",
		PodId:     "pod-1",
		Signal: &storage.ProcessSignal{
			ContainerId: "container-1",
			Name:        "nginx",
		},
	}

	nonMatchingIndicator := &storage.ProcessIndicator{
		ClusterId: helmClusterID,
		Namespace: "prod-namespace",
		PodId:     "pod-2",
		Signal: &storage.ProcessSignal{
			ContainerId: "container-2",
			Name:        "nginx",
		},
	}

	nilRuntimeControlIndicator := &storage.ProcessIndicator{
		ClusterId: nilRuntimeControlCluster.Id,
		Namespace: "any-namespace",
		PodId:     "pod-3",
		Signal: &storage.ProcessSignal{
			ContainerId: "container-3",
			Name:        "redis",
		},
	}

	manualClusterIndicator := &storage.ProcessIndicator{
		ClusterId: manualClusterID,
		Namespace: "manual-namespace",
		PodId:     "pod-4",
		Signal: &storage.ProcessSignal{
			ContainerId: "container-4",
			Name:        "busybox",
		},
	}

	s.Run("helm cluster with matching namespace filter", func() {
		matched, err := s.datastore.MatchProcessIndicator(ctx, matchingIndicator)
		require.NoError(s.T(), err)
		assert.True(s.T(), matched, "expected helm cluster regex to match namespace")
	})

	s.Run("helm cluster with non-matching namespace filter", func() {
		matched, err := s.datastore.MatchProcessIndicator(ctx, nonMatchingIndicator)
		require.NoError(s.T(), err)
		assert.False(s.T(), matched, "expected helm cluster regex NOT to match namespace")
	})

	s.Run("helm cluster with nil RuntimeDataControl", func() {
		matched, err := s.datastore.MatchProcessIndicator(ctx, nilRuntimeControlIndicator)
		require.NoError(s.T(), err, "expected default behavior without error when RuntimeDataControl is nil")
		assert.False(s.T(), matched, "expected default behavior to return not-matched when RuntimeDataControl is nil")
	})

	s.Run("manual-managed cluster", func() {
		// First call exercises initial cache population / lookup path.
		matched1, err := s.datastore.MatchProcessIndicator(ctx, manualClusterIndicator)
		require.NoError(s.T(), err)
		// We don't assert a specific match policy here, only that the behavior is consistent and non-erroring.
		// For the current implementation we expect "false" when no RuntimeDataControl is present.
		assert.False(s.T(), matched1, "expected manual-managed cluster to return not-matched by default")

		// Second call should hit any internal cache path, and must behave identically.
		matched2, err := s.datastore.MatchProcessIndicator(ctx, manualClusterIndicator)
		require.NoError(s.T(), err)
		assert.Equal(s.T(), matched1, matched2, "manual-managed cluster matching should be stable across cache hits")
	})

The edit assumes the following, which you may need to adjust to your existing code:

  1. The test suite already imports:

    • context (context.Background()),
    • github.com/stretchr/testify/require,
    • github.com/stretchr/testify/assert,
    • github.com/stackrox/rox/generated/storage,
    • github.com/stackrox/rox/pkg/fixtures/fixtureconsts,
    • and a UUID helper (github.com/stackrox/rox/pkg/uuid or similar) for uuid.NewV4().String().
      If any of these are missing or named differently, update the imports or replace the UUID generation with any existing helper.
  2. The datastore under test exposes:
    MatchProcessIndicator(ctx context.Context, pi *storage.ProcessIndicator) (bool, error)
    and UpsertCluster(ctx context.Context, c *storage.Cluster) error.
    If the signatures differ (e.g., additional parameters, cluster ID instead of looking it up from the indicator), adapt the calls accordingly.

  3. If your test suite uses a sac.WithAllAccess context instead of context.Background(), replace the context creation line with the appropriate helper to maintain consistency with other tests.

  4. Ensure that no remaining fragments of the original TestProcessMatching function remain below this replacement; the full body of the function should match the replacement shown above, terminated by a single closing brace } for the function.

Persistence: true,
},
},
},
}

ctx := sac.WithAllAccess(s.T().Context())

// populate regexp cache
s.datastore.idToRegexCache.Add(clusterID,
regexp.MustCompile(clusterPkg.GetNamespaceFilter(testCluster)))

indicator := &storage.ProcessIndicator{
Id: uuid.NewV4().String(),
DeploymentId: uuid.NewV4().String(),
ContainerName: uuid.NewV4().String(),
PodId: uuid.NewV4().String(),
ClusterId: clusterID,
Namespace: "test-namespace",
}

// The process with matching namespace should be found
match, err := s.datastore.MatchProcessIndicator(ctx, indicator)
s.NoError(err)
assert.True(s.T(), match)

// We change the namespace to not match anymore, and the process passes
indicator.Namespace = "other-namespace"
match, err = s.datastore.MatchProcessIndicator(ctx, indicator)
s.NoError(err)
assert.False(s.T(), match)

// We change the namespace to match the openshift pattern,
// and ask to exclude it
indicator.Namespace = "openshift-test"
testCluster.HelmConfig.DynamicConfig.RuntimeDataControl.ExcludeOpenshift = true

// We need to update the cache to take persistence into account
s.datastore.idToRegexCache.Add(clusterID,
regexp.MustCompile(clusterPkg.GetNamespaceFilter(testCluster)))

match, err = s.datastore.MatchProcessIndicator(ctx, indicator)
s.NoError(err)
assert.True(s.T(), match)

// No matter how the namespace looks like, if no persistence is requested,
// the process will match
indicator.Namespace = "something-completely-different"
testCluster.HelmConfig.DynamicConfig.RuntimeDataControl.Persistence = false

// We need to update the cache to take persistence into account
s.datastore.idToRegexCache.Add(clusterID,
regexp.MustCompile(clusterPkg.GetNamespaceFilter(testCluster)))

match, err = s.datastore.MatchProcessIndicator(ctx, indicator)
s.NoError(err)
assert.True(s.T(), match)
}
15 changes: 15 additions & 0 deletions central/cluster/datastore/mocks/datastore.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

15 changes: 12 additions & 3 deletions central/detection/lifecycle/manager_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ func (m *managerImpl) buildIndicatorFilter() {
}

log.Infof("Cleaning up %d processes as a part of building process filter", len(processesToRemove))
if err := m.processesDataStore.RemoveProcessIndicators(ctx, processesToRemove); err != nil {
if err := m.processesDataStore.RemoveProcessIndicators(ctx, processesToRemove, processIndicatorDatastore.RemovalReasonProcessFilter); err != nil {
utils.Should(errors.Wrap(err, "error removing process indicators"))
}
log.Infof("Successfully cleaned up those %d processes", len(processesToRemove))
Expand Down Expand Up @@ -209,7 +209,8 @@ func (m *managerImpl) autoLockProcessBaselines(baselines []*storage.ProcessBasel
}
}

// Perhaps the cluster config should be kept in memory and calling the database should not be needed
// Lifecycle manager uses cachedStorage for cluster datastore,
// thus repeated calls are memoized
func (m *managerImpl) isAutoLockEnabledForCluster(clusterId string) bool {
if !features.AutoLockProcessBaselines.Enabled() {
return false
Expand Down Expand Up @@ -252,7 +253,15 @@ func (m *managerImpl) flushIndicatorQueue() {
if m.deletedDeploymentsCache.Contains(indicator.GetDeploymentId()) {
continue
}
indicatorSlice = append(indicatorSlice, indicator)

match, err := m.clusterDataStore.MatchProcessIndicator(lifecycleMgrCtx, indicator)
if err != nil {
log.Errorf("Cannot match indicator %+v: %v", indicator, err)
} else if !match {
indicatorSlice = append(indicatorSlice, indicator)
} else {
Comment on lines +257 to +262
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🚨 suggestion (security): Reduce verbose logging of full process indicators in the hot path

This branch logs the full indicator struct at info level when filtered out. Given the high volume of process indicators, this can both inflate log size and leak sensitive runtime details. Please reduce this to debug level and/or log only key fields (e.g., cluster, namespace, deployment, exec path) instead of the full struct.

Suggested implementation:

		} else {
			log.Debugf("Process indicator filtered out (cluster=%s, namespace=%s, deployment=%s, execPath=%s)",
				indicator.GetClusterId(),
				indicator.GetNamespace(),
				indicator.GetDeploymentId(),
				indicator.GetSignal().GetExecFilePath())
		}

You may need to adjust the field accessors used in the debug log depending on the actual ProcessIndicator API in this codebase. For example:

  • If there is a GetDeploymentName() instead of GetDeploymentId(), switch to that.
  • If namespace or cluster are nested (e.g., indicator.GetPod().GetNamespace()), update the call chain accordingly.
  • If GetSignal().GetExecFilePath() is not available, use the appropriate field that represents the executed path or process name.

log.Infof("Process Indicator doesn't match %+v", indicator)
}
}

// Index the process indicators in batch
Expand Down
54 changes: 54 additions & 0 deletions central/detection/lifecycle/manager_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,20 @@ import (

"github.com/pkg/errors"
clusterDataStoreMocks "github.com/stackrox/rox/central/cluster/datastore/mocks"
"github.com/stackrox/rox/central/deployment/cache"
queueMocks "github.com/stackrox/rox/central/deployment/queue/mocks"
alertManagerMocks "github.com/stackrox/rox/central/detection/alertmanager/mocks"
processBaselineDataStoreMocks "github.com/stackrox/rox/central/processbaseline/datastore/mocks"
processIndicatorsDataStoreMocks "github.com/stackrox/rox/central/processindicator/datastore/mocks"
piFilter "github.com/stackrox/rox/central/processindicator/filter"
reprocessorMocks "github.com/stackrox/rox/central/reprocessor/mocks"
connectionMocks "github.com/stackrox/rox/central/sensor/service/connection/mocks"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/pkg/env"
"github.com/stackrox/rox/pkg/features"
"github.com/stackrox/rox/pkg/fixtures"
"github.com/stackrox/rox/pkg/fixtures/fixtureconsts"
"github.com/stackrox/rox/pkg/process/filter"
"github.com/stackrox/rox/pkg/protoassert"
"github.com/stackrox/rox/pkg/protocompat"
"github.com/stackrox/rox/pkg/set"
Expand Down Expand Up @@ -82,6 +86,8 @@ type ManagerTestSuite struct {
mockCtrl *gomock.Controller
connectionManager *connectionMocks.MockManager
cluster *clusterDataStoreMocks.MockDataStore
indicators *processIndicatorsDataStoreMocks.MockDataStore
filter filter.Filter
}

func (suite *ManagerTestSuite) SetupTest() {
Expand All @@ -93,6 +99,8 @@ func (suite *ManagerTestSuite) SetupTest() {
suite.deploymentObservationQueue = queueMocks.NewMockDeploymentObservationQueue(suite.mockCtrl)
suite.connectionManager = connectionMocks.NewMockManager(suite.mockCtrl)
suite.cluster = clusterDataStoreMocks.NewMockDataStore(suite.mockCtrl)
suite.filter = piFilter.Singleton()
suite.indicators = processIndicatorsDataStoreMocks.NewMockDataStore(suite.mockCtrl)

suite.manager = &managerImpl{
baselines: suite.baselines,
Expand All @@ -101,6 +109,10 @@ func (suite *ManagerTestSuite) SetupTest() {
deploymentObservationQueue: suite.deploymentObservationQueue,
connectionManager: suite.connectionManager,
clusterDataStore: suite.cluster,
processFilter: suite.filter,
processesDataStore: suite.indicators,
queuedIndicators: make(map[string]*storage.ProcessIndicator),
deletedDeploymentsCache: cache.DeletedDeploymentsSingleton(),
}
}

Expand Down Expand Up @@ -331,3 +343,45 @@ func (suite *ManagerTestSuite) TestAutoLockProcessBaselinesNoCluster() {
enabled := suite.manager.isAutoLockEnabledForCluster(clusterId)
suite.False(enabled)
}

func (suite *ManagerTestSuite) TestFlushIndicators() {
_, indicator1 := makeIndicator()
_, indicator2 := makeIndicator()

// Make first indicator to match and be filtered out
suite.cluster.EXPECT().MatchProcessIndicator(gomock.Any(), indicator1).
Return(true, nil)

// The second indicator should pass through
suite.cluster.EXPECT().MatchProcessIndicator(gomock.Any(), indicator2).
Comment on lines +347 to +356
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Add tests for error and edge paths in flushIndicatorQueue (MatchProcessIndicator errors and empty queue)

Right now this only validates the happy path. Please also add tests for:

  • MatchProcessIndicator returning an error (e.g., mock returning (false, err)) to document whether that indicator is dropped or still persisted.
  • An empty queuedIndicators map (or all indicators filtered out) to confirm no AddProcessIndicators calls are made and no panics occur.

These will better cover the new filtering behavior in the lifecycle manager.

Suggested implementation:

func (suite *ManagerTestSuite) TestFlushIndicators() {
	deploymentID1, indicator1 := makeIndicator()
	deploymentID2, indicator2 := makeIndicator()

	// Queue both indicators.
	suite.manager.queuedIndicators[deploymentID1] = indicator1
	suite.manager.queuedIndicators[deploymentID2] = indicator2

	// Make first indicator to match and be filtered out.
	suite.cluster.EXPECT().MatchProcessIndicator(gomock.Any(), indicator1).
		Return(true, nil)

	// The second indicator should pass through.
	suite.cluster.EXPECT().MatchProcessIndicator(gomock.Any(), indicator2).
		Return(false, nil)

	// Only the second indicator should be persisted.
	suite.indicators.EXPECT().
		AddProcessIndicators(gomock.Any(), []*storage.ProcessIndicator{indicator2}).
		Return(nil)

	suite.manager.flushIndicatorQueue(context.Background())
}

func (suite *ManagerTestSuite) TestFlushIndicators_MatchError() {
	deploymentID, indicator := makeIndicator()

	// Queue the indicator.
	suite.manager.queuedIndicators[deploymentID] = indicator

	// Simulate a matching error for this indicator.
	suite.cluster.EXPECT().
		MatchProcessIndicator(gomock.Any(), indicator).
		Return(false, errors.New("match error"))

	// On error, the indicator should still be persisted so it isn't lost.
	suite.indicators.EXPECT().
		AddProcessIndicators(gomock.Any(), []*storage.ProcessIndicator{indicator}).
		Return(nil)

	suite.manager.flushIndicatorQueue(context.Background())
}

func (suite *ManagerTestSuite) TestFlushIndicators_EmptyQueue() {
	// Start with an empty queue to exercise the edge case.
	suite.manager.queuedIndicators = make(map[string]*storage.ProcessIndicator)

	// No indicators should be written when the queue is empty.
	suite.indicators.EXPECT().
		AddProcessIndicators(gomock.Any(), gomock.Any()).
		Times(0)

	// Should not panic and should not attempt to persist indicators.
	suite.manager.flushIndicatorQueue(context.Background())
}
  1. Ensure the test file imports the context and errors packages (if they are not already imported):
    • import "context"
    • import "errors"
  2. If the queuedIndicators field on suite.manager can be nil before usage in some tests, make sure it is initialized in the test suite setup (e.g., in SetupTest / SetupSuite) to avoid nil map assignments.
  3. Adjust the expectation in TestFlushIndicators_MatchError if the actual, desired behavior for a MatchProcessIndicator error is to drop the indicator instead of persisting it:
    • In that case, change the AddProcessIndicators expectation to .Times(0) and update the test name/comment to reflect dropping-on-error semantics.

Return(false, nil)

// The second indicator will cause reading of the corresponding baseline...
suite.baselines.EXPECT().GetProcessBaseline(gomock.Any(), gomock.Any()).
Return(nil, false, nil)

// ... as well as new deployment in the observation queue ...
suite.deploymentObservationQueue.EXPECT().
InObservation(gomock.Any())

// ... and update of the corresponding baseline
suite.baselines.EXPECT().UpsertProcessBaseline(
gomock.Any(), gomock.Any(), gomock.Any(), true, true).
Return(nil, nil)

// Only the second indicator is getting stored
suite.indicators.EXPECT().
AddProcessIndicators(gomock.Any(), []*storage.ProcessIndicator{indicator2})

// Unfortunately it's not easy to test new indicators in the lifecycle
// manager at the higher level, since flushIndicatorQueue is executed in a
// separate go routine, which makes mock expectation checking complicated
// (we have to somehow wait for this routine to end). Thus we test at
// flushIndicatorQueue level, and manually prepare the indicator queue.
//
// TODO: Is it possible to incorporate testing/synctest here on top of testify?
suite.manager.queuedIndicators[indicator1.GetId()] = indicator1
suite.manager.queuedIndicators[indicator2.GetId()] = indicator2

suite.manager.flushIndicatorQueue()
}
Loading
Loading