From df0ef25dd5cff0ce893208fa6d4e230504bd4fa4 Mon Sep 17 00:00:00 2001 From: Matthias Bertschy Date: Mon, 20 Nov 2023 17:31:19 +0100 Subject: [PATCH] more tests Signed-off-by: Matthias Bertschy --- adapters/backend/v1/adapter.go | 9 +- adapters/backend/v1/client.go | 14 +- adapters/backend/v1/pulsar.go | 2 +- adapters/mock.go | 2 +- adapters/mock_test.go | 568 +++++++++++++++++++++++++++++++++ config/config.go | 15 +- config/config_test.go | 204 ++++++++++++ configuration/clusterData.json | 23 ++ configuration/services.json | 11 + core/synchronizer.go | 12 +- domain/identifiers_test.go | 85 +++++ domain/utils.go | 5 - domain/utils_test.go | 27 ++ utils/cooldownqueue_test.go | 103 ++++++ utils/testdata/pod.json | 165 ++++++++++ utils/utils.go | 4 + utils/utils_test.go | 99 ++++++ 17 files changed, 1317 insertions(+), 31 deletions(-) create mode 100644 adapters/mock_test.go create mode 100644 config/config_test.go create mode 100644 configuration/clusterData.json create mode 100644 configuration/services.json create mode 100644 domain/identifiers_test.go create mode 100644 domain/utils_test.go create mode 100644 utils/cooldownqueue_test.go create mode 100644 utils/testdata/pod.json create mode 100644 utils/utils_test.go diff --git a/adapters/backend/v1/adapter.go b/adapters/backend/v1/adapter.go index 4275d0a..091e907 100644 --- a/adapters/backend/v1/adapter.go +++ b/adapters/backend/v1/adapter.go @@ -8,6 +8,7 @@ import ( "github.com/kubescape/synchronizer/adapters" "github.com/kubescape/synchronizer/domain" "github.com/kubescape/synchronizer/messaging" + "github.com/kubescape/synchronizer/utils" ) type Adapter struct { @@ -31,7 +32,7 @@ func NewBackendAdapter(mainContext context.Context, messageProducer messaging.Me var _ adapters.Adapter = (*Adapter)(nil) func (b *Adapter) getClient(ctx context.Context) (adapters.Client, error) { - id := domain.ClientIdentifierFromContext(ctx) + id := utils.ClientIdentifierFromContext(ctx) if client, ok := b.clientsMap.Load(id.String()); ok { return client.(adapters.Client), nil } @@ -39,7 +40,7 @@ func (b *Adapter) getClient(ctx context.Context) (adapters.Client, error) { } func (b *Adapter) Callbacks(ctx context.Context) (domain.Callbacks, error) { - id := domain.ClientIdentifierFromContext(ctx) + id := utils.ClientIdentifierFromContext(ctx) if callbacks, ok := b.callbacksMap.Load(id.String()); ok { return callbacks.(domain.Callbacks), nil } @@ -79,7 +80,7 @@ func (b *Adapter) PutObject(ctx context.Context, id domain.KindName, object []by } func (b *Adapter) RegisterCallbacks(ctx context.Context, callbacks domain.Callbacks) { - id := domain.ClientIdentifierFromContext(ctx) + id := utils.ClientIdentifierFromContext(ctx) b.callbacksMap.Store(id.String(), callbacks) } @@ -89,7 +90,7 @@ func (b *Adapter) Start(ctx context.Context) error { }) client := NewClient(b.producer) - id := domain.ClientIdentifierFromContext(ctx) + id := utils.ClientIdentifierFromContext(ctx) b.clientsMap.Store(id.String(), client) callbacks, err := b.Callbacks(ctx) if err != nil { diff --git a/adapters/backend/v1/client.go b/adapters/backend/v1/client.go index c3e1df9..5771ed7 100644 --- a/adapters/backend/v1/client.go +++ b/adapters/backend/v1/client.go @@ -39,7 +39,7 @@ func (c *Client) sendServerConnectedMessage(ctx context.Context) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - id := domain.ClientIdentifierFromContext(ctx) + id := utils.ClientIdentifierFromContext(ctx) msg := messaging.ServerConnectedMessage{ Cluster: id.Cluster, @@ -95,7 +95,7 @@ func (c *Client) PutObject(ctx context.Context, id domain.KindName, object []byt return c.sendPutObjectMessage(ctx, id, object) } -func (c *Client) RegisterCallbacks(mainCtx context.Context, callbacks domain.Callbacks) { +func (c *Client) RegisterCallbacks(_ context.Context, callbacks domain.Callbacks) { c.callbacks = callbacks } @@ -111,7 +111,7 @@ func (c *Client) VerifyObject(ctx context.Context, id domain.KindName, checksum func (c *Client) sendDeleteObjectMessage(ctx context.Context, id domain.KindName) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - cId := domain.ClientIdentifierFromContext(ctx) + cId := utils.ClientIdentifierFromContext(ctx) msg := messaging.DeleteObjectMessage{ Cluster: cId.Cluster, @@ -138,7 +138,7 @@ func (c *Client) sendDeleteObjectMessage(ctx context.Context, id domain.KindName func (c *Client) sendGetObjectMessage(ctx context.Context, id domain.KindName, baseObject []byte) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - cId := domain.ClientIdentifierFromContext(ctx) + cId := utils.ClientIdentifierFromContext(ctx) msg := messaging.GetObjectMessage{ BaseObject: baseObject, @@ -167,7 +167,7 @@ func (c *Client) sendGetObjectMessage(ctx context.Context, id domain.KindName, b func (c *Client) sendPatchObjectMessage(ctx context.Context, id domain.KindName, checksum string, patch []byte) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - cId := domain.ClientIdentifierFromContext(ctx) + cId := utils.ClientIdentifierFromContext(ctx) msg := messaging.PatchObjectMessage{ Checksum: checksum, @@ -198,7 +198,7 @@ func (c *Client) sendPatchObjectMessage(ctx context.Context, id domain.KindName, func (c *Client) sendPutObjectMessage(ctx context.Context, id domain.KindName, object []byte) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - cId := domain.ClientIdentifierFromContext(ctx) + cId := utils.ClientIdentifierFromContext(ctx) msg := messaging.PutObjectMessage{ Cluster: cId.Cluster, @@ -227,7 +227,7 @@ func (c *Client) sendPutObjectMessage(ctx context.Context, id domain.KindName, o func (c *Client) sendVerifyObjectMessage(ctx context.Context, id domain.KindName, checksum string) error { depth := ctx.Value(domain.ContextKeyDepth).(int) msgId := ctx.Value(domain.ContextKeyMsgId).(string) - cId := domain.ClientIdentifierFromContext(ctx) + cId := utils.ClientIdentifierFromContext(ctx) msg := messaging.VerifyObjectMessage{ Checksum: checksum, diff --git a/adapters/backend/v1/pulsar.go b/adapters/backend/v1/pulsar.go index 8884e3a..282d28e 100644 --- a/adapters/backend/v1/pulsar.go +++ b/adapters/backend/v1/pulsar.go @@ -226,7 +226,7 @@ type PulsarMessageProducer struct { func NewPulsarMessageProducer(cfg config.Config, pulsarClient pulsarconnector.Client) (*PulsarMessageProducer, error) { topic := cfg.Backend.Topic - fullTopic := pulsarconnector.BuildPersistentTopic(pulsarClient.GetConfig().Tenant, pulsarClient.GetConfig().Namespace, pulsarconnector.TopicName(topic)) + fullTopic := pulsarconnector.BuildPersistentTopic(pulsarClient.GetConfig().Tenant, pulsarClient.GetConfig().Namespace, topic) options := pulsar.ProducerOptions{ DisableBatching: true, diff --git a/adapters/mock.go b/adapters/mock.go index 7137ff7..9673082 100644 --- a/adapters/mock.go +++ b/adapters/mock.go @@ -98,7 +98,7 @@ func (m *MockAdapter) PutObject(_ context.Context, id domain.KindName, object [] return nil } -func (m *MockAdapter) RegisterCallbacks(mainCtx context.Context, callbacks domain.Callbacks) { +func (m *MockAdapter) RegisterCallbacks(_ context.Context, callbacks domain.Callbacks) { m.callbacks = callbacks } diff --git a/adapters/mock_test.go b/adapters/mock_test.go new file mode 100644 index 0000000..36b790c --- /dev/null +++ b/adapters/mock_test.go @@ -0,0 +1,568 @@ +package adapters + +import ( + "context" + "reflect" + "testing" + + "github.com/kubescape/synchronizer/domain" + "github.com/stretchr/testify/assert" +) + +var ( + kindDeployment = domain.KindName{ + Kind: domain.KindFromString("apps/v1/Deployment"), + Name: "name", + Namespace: "namespace", + } +) + +func TestCallDeleteObject(t *testing.T) { + tests := []struct { + name string + id domain.KindName + patchStrategy bool + want []domain.KindName + wantErr bool + }{ + { + name: "delete object", + id: kindDeployment, + patchStrategy: true, + want: []domain.KindName{kindDeployment}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + deletedIDs := []domain.KindName{} + m := &MockAdapter{ + callbacks: domain.Callbacks{ + DeleteObject: func(ctx context.Context, id domain.KindName) error { + deletedIDs = append(deletedIDs, id) + return nil + }, + }, + patchStrategy: tt.patchStrategy, + Resources: map[string][]byte{}, + shadowObjects: map[string][]byte{}, + } + if err := m.TestCallDeleteObject(ctx, tt.id); (err != nil) != tt.wantErr { + t.Errorf("TestCallDeleteObject() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.want, deletedIDs) + }) + } +} + +func TestCallPutOrPatch(t *testing.T) { + tests := []struct { + name string + id domain.KindName + baseObject []byte + newObject []byte + patchStrategy bool + wantCheckSums []string + wantPatches [][]byte + wantPatched []domain.KindName + wantPut []domain.KindName + wantErr bool + }{ + { + name: "no patch strategy", + id: kindDeployment, + patchStrategy: false, + wantCheckSums: []string{}, + wantPatches: [][]byte{}, + wantPatched: []domain.KindName{}, + wantPut: []domain.KindName{kindDeployment}, + }, + { + name: "no base object", + id: kindDeployment, + patchStrategy: true, + wantCheckSums: []string{}, + wantPatches: [][]byte{}, + wantPatched: []domain.KindName{}, + wantPut: []domain.KindName{kindDeployment}, + }, + { + name: "base object -> patch", + id: kindDeployment, + baseObject: []byte(`{"metadata":{"name":"name"},"spec":{"replicas":1}}`), + newObject: []byte(`{"metadata":{"name":"name"},"spec":{"replicas":2}}`), + patchStrategy: true, + wantCheckSums: []string{"5b7a89928d3dbee97066bf5197895de61755d727a3d648f565d4d1b46716bb4a"}, + wantPatches: [][]byte{[]byte(`{"spec":{"replicas":2}}`)}, + wantPatched: []domain.KindName{kindDeployment}, + wantPut: []domain.KindName{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + checksums := []string{} + patches := [][]byte{} + patchedIDs := []domain.KindName{} + putIDs := []domain.KindName{} + m := &MockAdapter{ + callbacks: domain.Callbacks{ + PatchObject: func(ctx context.Context, id domain.KindName, checksum string, patch []byte) error { + checksums = append(checksums, checksum) + patches = append(patches, patch) + patchedIDs = append(patchedIDs, id) + return nil + }, + PutObject: func(ctx context.Context, id domain.KindName, object []byte) error { + putIDs = append(putIDs, id) + return nil + }, + }, + patchStrategy: tt.patchStrategy, + Resources: map[string][]byte{}, + shadowObjects: map[string][]byte{}, + } + if err := m.TestCallPutOrPatch(ctx, tt.id, tt.baseObject, tt.newObject); (err != nil) != tt.wantErr { + t.Errorf("TestCallPutOrPatch() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.wantCheckSums, checksums) + assert.Equal(t, tt.wantPatches, patches) + assert.Equal(t, tt.wantPatched, patchedIDs) + assert.Equal(t, tt.wantPut, putIDs) + }) + } +} + +func TestCallVerifyObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + ctx context.Context + id domain.KindName + object []byte + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.TestCallVerifyObject(tt.args.ctx, tt.args.id, tt.args.object); (err != nil) != tt.wantErr { + t.Errorf("TestCallVerifyObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_Callbacks(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + in0 context.Context + } + tests := []struct { + name string + fields fields + args args + want domain.Callbacks + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + got, err := m.Callbacks(tt.args.in0) + if (err != nil) != tt.wantErr { + t.Errorf("Callbacks() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Callbacks() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMockAdapter_DeleteObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + in0 context.Context + id domain.KindName + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.DeleteObject(tt.args.in0, tt.args.id); (err != nil) != tt.wantErr { + t.Errorf("DeleteObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_GetObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + ctx context.Context + id domain.KindName + baseObject []byte + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.GetObject(tt.args.ctx, tt.args.id, tt.args.baseObject); (err != nil) != tt.wantErr { + t.Errorf("GetObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_PatchObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + ctx context.Context + id domain.KindName + checksum string + patch []byte + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.PatchObject(tt.args.ctx, tt.args.id, tt.args.checksum, tt.args.patch); (err != nil) != tt.wantErr { + t.Errorf("PatchObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_PutObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + in0 context.Context + id domain.KindName + object []byte + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.PutObject(tt.args.in0, tt.args.id, tt.args.object); (err != nil) != tt.wantErr { + t.Errorf("PutObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_RegisterCallbacks(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + in0 context.Context + callbacks domain.Callbacks + } + tests := []struct { + name string + fields fields + args args + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + m.RegisterCallbacks(tt.args.in0, tt.args.callbacks) + }) + } +} + +func TestMockAdapter_Start(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + in0 context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.Start(tt.args.in0); (err != nil) != tt.wantErr { + t.Errorf("Start() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_VerifyObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + ctx context.Context + id domain.KindName + newChecksum string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + if err := m.VerifyObject(tt.args.ctx, tt.args.id, tt.args.newChecksum); (err != nil) != tt.wantErr { + t.Errorf("VerifyObject() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestMockAdapter_patchObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + id domain.KindName + checksum string + patch []byte + } + tests := []struct { + name string + fields fields + args args + want []byte + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + got, err := m.patchObject(tt.args.id, tt.args.checksum, tt.args.patch) + if (err != nil) != tt.wantErr { + t.Errorf("patchObject() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("patchObject() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMockAdapter_verifyObject(t *testing.T) { + type fields struct { + callbacks domain.Callbacks + patchStrategy bool + Resources map[string][]byte + shadowObjects map[string][]byte + } + type args struct { + id domain.KindName + newChecksum string + } + tests := []struct { + name string + fields fields + args args + want []byte + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &MockAdapter{ + callbacks: tt.fields.callbacks, + patchStrategy: tt.fields.patchStrategy, + Resources: tt.fields.Resources, + shadowObjects: tt.fields.shadowObjects, + } + got, err := m.verifyObject(tt.args.id, tt.args.newChecksum) + if (err != nil) != tt.wantErr { + t.Errorf("verifyObject() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("verifyObject() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNewMockAdapter(t *testing.T) { + type args struct { + patchStrategy bool + } + tests := []struct { + name string + args args + want *MockAdapter + }{ + { + name: "client side", + args: args{ + patchStrategy: true, + }, + want: &MockAdapter{ + patchStrategy: true, + Resources: map[string][]byte{}, + shadowObjects: map[string][]byte{}, + }, + }, + { + name: "server side", + args: args{ + patchStrategy: false, + }, + want: &MockAdapter{ + patchStrategy: false, + Resources: map[string][]byte{}, + shadowObjects: map[string][]byte{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := NewMockAdapter(tt.args.patchStrategy) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/config/config.go b/config/config.go index a641987..42e71ec 100644 --- a/config/config.go +++ b/config/config.go @@ -55,22 +55,23 @@ func (r Resource) String() string { // LoadConfig reads configuration from file or environment variables. func LoadConfig(path string) (Config, error) { + v := viper.New() // singleton prevents running tests in parallel if configPathFromEnv := os.Getenv("CONFIG"); configPathFromEnv != "" { - viper.AddConfigPath(configPathFromEnv) + v.AddConfigPath(configPathFromEnv) } - viper.AddConfigPath(path) - viper.SetConfigName("config") - viper.SetConfigType("json") + v.AddConfigPath(path) + v.SetConfigName("config") + v.SetConfigType("json") - viper.AutomaticEnv() + v.AutomaticEnv() - err := viper.ReadInConfig() + err := v.ReadInConfig() if err != nil { return Config{}, err } var config Config - err = viper.Unmarshal(&config) + err = v.Unmarshal(&config) return config, err } diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 0000000..3310fa6 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,204 @@ +package config + +import ( + "os" + "testing" + + "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/utils-k8s-go/armometadata" + "github.com/kubescape/backend/pkg/servicediscovery/schema" + v2 "github.com/kubescape/backend/pkg/servicediscovery/v2" + pulsarconfig "github.com/kubescape/messaging/pulsar/config" + "github.com/kubescape/synchronizer/domain" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestLoadClusterConfig(t *testing.T) { + tests := []struct { + name string + env map[string]string + want armometadata.ClusterConfig + }{ + { + name: "cluster config", + env: map[string]string{"CLUSTER_CONFIG": "../configuration/clusterData.json"}, + want: armometadata.ClusterConfig{ + ClusterName: "kind", + GatewayWebsocketURL: "gateway:8001", + GatewayRestURL: "gateway:8002", + KubevulnURL: "kubevuln:8080", + KubescapeURL: "kubescape:8080", + InstallationData: armotypes.InstallationData{ + StorageEnabled: ptr.To[bool](true), + RelevantImageVulnerabilitiesEnabled: ptr.To[bool](false), + RelevantImageVulnerabilitiesConfiguration: "disable", + Namespace: "kubescape", + ImageVulnerabilitiesScanningEnabled: ptr.To[bool](false), + PostureScanEnabled: ptr.To[bool](false), + OtelCollectorEnabled: ptr.To[bool](true), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + err := os.Setenv(k, v) + assert.NoError(t, err) + } + got, err := LoadClusterConfig() + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestLoadConfig(t *testing.T) { + tests := []struct { + name string + path string + want Config + }{ + { + name: "client config", + path: "../configuration/client", + want: Config{ + InCluster: InCluster{ + ServerUrl: "ws://127.0.0.1:8080/", + ClusterName: "cluster-1", + Account: "11111111-2222-3333-4444-11111111", + AccessKey: "xxxxxxxx-1111-1111-1111-xxxxxxxx", + Resources: []Resource{ + {Group: "apps", Version: "v1", Resource: "deployments", Strategy: "patch"}, + {Group: "", Version: "v1", Resource: "pods", Strategy: "patch"}, + {Group: "spdx.softwarecomposition.kubescape.io", Version: "v1beta1", Resource: "sbomspdxv2p3s", Strategy: "copy"}, + {Group: "spdx.softwarecomposition.kubescape.io", Version: "v1beta1", Resource: "sbomspdxv2p3filtereds", Strategy: "copy"}, + }, + }, + }, + }, + { + name: "server config", + path: "../configuration/server", + want: Config{ + Backend: Backend{ + AuthenticationServer: &AuthenticationServerConfig{ + Url: "https://api.armosec.io/api/v1", + HeaderToQueryParamMapping: map[string]string{"x-api-account": "customerGUID"}, + HeaderToHeaderMapping: map[string]string{"x-api-key": "X-API-KEY"}, + }, + Subscription: "subscription", + PulsarConfig: &pulsarconfig.PulsarConfig{ + URL: "pulsar://localhost:6650", + Tenant: "kubescape", + Namespace: "kubescape", + AdminUrl: "http://localhost:8081", + Clusters: []string{"standalone"}, + RedeliveryDelaySeconds: 0, + MaxDeliveryAttempts: 2, + }, + Topic: "synchronizer", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := LoadConfig(tt.path) + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestLoadServiceURLs(t *testing.T) { + tests := []struct { + name string + env map[string]string + filePath string + want schema.IBackendServices + }{ + { + name: "via filePath", + filePath: "../configuration/services.json", + want: &v2.ServicesV2{ + EventReceiverHttpUrl: "https://er-test.com", + EventReceiverWebsocketUrl: "wss://er-test.com", + GatewayUrl: "https://gw.test.com", + ApiServerUrl: "https://api.test.com", + MetricsUrl: "https://metrics.test.com", + SynchronizerUrl: "wss://synchronizer.test.com", + }, + }, + { + name: "via env", + env: map[string]string{"SERVICES": "../configuration/services.json"}, + want: &v2.ServicesV2{ + EventReceiverHttpUrl: "https://er-test.com", + EventReceiverWebsocketUrl: "wss://er-test.com", + GatewayUrl: "https://gw.test.com", + ApiServerUrl: "https://api.test.com", + MetricsUrl: "https://metrics.test.com", + SynchronizerUrl: "wss://synchronizer.test.com", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for k, v := range tt.env { + err := os.Setenv(k, v) + assert.NoError(t, err) + } + got, err := LoadServiceURLs(tt.filePath) + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestResource_String(t *testing.T) { + type fields struct { + Group string + Version string + Resource string + Strategy domain.Strategy + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "deployments", + fields: fields{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + want: "apps/v1/deployments", + }, + { + name: "pods", + fields: fields{ + Group: "", + Version: "v1", + Resource: "pods", + }, + want: "/v1/pods", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := Resource{ + Group: tt.fields.Group, + Version: tt.fields.Version, + Resource: tt.fields.Resource, + Strategy: tt.fields.Strategy, + } + if got := r.String(); got != tt.want { + t.Errorf("String() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/configuration/clusterData.json b/configuration/clusterData.json new file mode 100644 index 0000000..a5f711c --- /dev/null +++ b/configuration/clusterData.json @@ -0,0 +1,23 @@ +{ + "serviceDiscovery": true, + "gatewayWebsocketURL": "gateway:8001", + "gatewayRestURL": "gateway:8002", + "vulnScanURL": "kubevuln:8080", + "kubevulnURL": "kubevuln:8080", + "kubescapeURL": "kubescape:8080", + "triggerNewImageScan": "false", + "clusterName": "kind", + "storage": true, + "relevantImageVulnerabilitiesEnabled": false, + "namespace": "kubescape", + "imageVulnerabilitiesScanningEnabled": false, + "postureScanEnabled": false, + "otelCollector": true, + "nodeAgent": "false", + "maxImageSize": 5.36870912e+09, + "keepLocal": false, + "scanTimeout": "5m", + "vexGeneration": false, + "continuousPostureScan": false, + "relevantImageVulnerabilitiesConfiguration": "disable" +} diff --git a/configuration/services.json b/configuration/services.json new file mode 100644 index 0000000..1050402 --- /dev/null +++ b/configuration/services.json @@ -0,0 +1,11 @@ +{ + "version": "v2", + "response": { + "event-receiver-http": "https://er-test.com", + "event-receiver-ws": "wss://er-test.com", + "gateway": "https://gw.test.com", + "api-server": "https://api.test.com", + "metrics": "https://metrics.test.com", + "synchronizer": "wss://synchronizer.test.com" + } +} diff --git a/core/synchronizer.go b/core/synchronizer.go index 5bee011..562932c 100644 --- a/core/synchronizer.go +++ b/core/synchronizer.go @@ -109,7 +109,7 @@ func (s *Synchronizer) VerifyObjectCallback(ctx context.Context, id domain.KindN } func (s *Synchronizer) Start(ctx context.Context) error { - identifiers := domain.ClientIdentifierFromContext(ctx) + identifiers := utils.ClientIdentifierFromContext(ctx) logger.L().Info("starting sync", helpers.String("account", identifiers.Account), helpers.String("cluster", identifiers.Cluster)) // adapter events err := s.adapter.Start(ctx) @@ -314,7 +314,7 @@ func (s *Synchronizer) sendGetObject(ctx context.Context, id domain.KindName, ba if err != nil { return fmt.Errorf("invoke outPool on get object message: %w", err) } - clientId := domain.ClientIdentifierFromContext(ctx) + clientId := utils.ClientIdentifierFromContext(ctx) logger.L().Debug("sent get object message", helpers.String("account", clientId.Account), helpers.String("cluster", clientId.Cluster), @@ -349,7 +349,7 @@ func (s *Synchronizer) sendNewChecksum(ctx context.Context, id domain.KindName, if msg.Kind == nil { return fmt.Errorf("invalid resource kind. name: %s", msg.Name) } - clientId := domain.ClientIdentifierFromContext(ctx) + clientId := utils.ClientIdentifierFromContext(ctx) logger.L().Debug("sent new checksum message", helpers.String("account", clientId.Account), helpers.String("cluster", clientId.Cluster), @@ -380,7 +380,7 @@ func (s *Synchronizer) sendObjectDeleted(ctx context.Context, id domain.KindName if err != nil { return fmt.Errorf("invoke outPool on delete message: %w", err) } - clientId := domain.ClientIdentifierFromContext(ctx) + clientId := utils.ClientIdentifierFromContext(ctx) logger.L().Debug("sent object deleted message", helpers.String("account", clientId.Account), helpers.String("cluster", clientId.Cluster), @@ -414,7 +414,7 @@ func (s *Synchronizer) sendPatchObject(ctx context.Context, id domain.KindName, return fmt.Errorf("invoke outPool on patch message: %w", err) } - clientId := domain.ClientIdentifierFromContext(ctx) + clientId := utils.ClientIdentifierFromContext(ctx) logger.L().Debug("sent patch object message", helpers.String("account", clientId.Account), @@ -449,7 +449,7 @@ func (s *Synchronizer) sendPutObject(ctx context.Context, id domain.KindName, ob return fmt.Errorf("invoke outPool on put object message: %w", err) } - clientId := domain.ClientIdentifierFromContext(ctx) + clientId := utils.ClientIdentifierFromContext(ctx) logger.L().Debug("sent put object message", helpers.String("account", clientId.Account), helpers.String("cluster", clientId.Cluster), diff --git a/domain/identifiers_test.go b/domain/identifiers_test.go new file mode 100644 index 0000000..5cdb7e3 --- /dev/null +++ b/domain/identifiers_test.go @@ -0,0 +1,85 @@ +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClientIdentifier_String(t *testing.T) { + type fields struct { + Account string + Cluster string + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "account and cluster", + fields: fields{ + Account: "account", + Cluster: "cluster", + }, + want: "account/cluster", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := ClientIdentifier{ + Account: tt.fields.Account, + Cluster: tt.fields.Cluster, + } + got := c.String() + assert.Equal(t, tt.want, got) + }) + } +} + +func TestKindName_String(t *testing.T) { + type fields struct { + Kind *Kind + Name string + Namespace string + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "kind, name and namespace", + fields: fields{ + Kind: &Kind{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + Name: "name", + Namespace: "namespace", + }, + want: "apps/v1/deployments/namespace/name", + }, + { + name: "empty kind", + fields: fields{ + Name: "name", + Namespace: "namespace", + }, + want: "/namespace/name", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := KindName{ + Kind: tt.fields.Kind, + Name: tt.fields.Name, + Namespace: tt.fields.Namespace, + } + if got := c.String(); got != tt.want { + t.Errorf("String() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/domain/utils.go b/domain/utils.go index fb4b6a9..0db033e 100644 --- a/domain/utils.go +++ b/domain/utils.go @@ -1,7 +1,6 @@ package domain import ( - "context" "strings" "github.com/kubescape/go-logger" @@ -25,7 +24,3 @@ func KindFromString(kind string) *Kind { Resource: parts[2], } } - -func ClientIdentifierFromContext(ctx context.Context) ClientIdentifier { - return ctx.Value(ContextKeyClientIdentifier).(ClientIdentifier) -} diff --git a/domain/utils_test.go b/domain/utils_test.go new file mode 100644 index 0000000..1c51776 --- /dev/null +++ b/domain/utils_test.go @@ -0,0 +1,27 @@ +package domain + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKind_RoundTrip(t *testing.T) { + tests := []struct { + name string + }{ + { + name: "apps/v1/deployments", + }, + { + name: "/v1/pods", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + k := KindFromString(tt.name) + got := k.String() + assert.Equal(t, tt.name, got) + }) + } +} diff --git a/utils/cooldownqueue_test.go b/utils/cooldownqueue_test.go new file mode 100644 index 0000000..904c954 --- /dev/null +++ b/utils/cooldownqueue_test.go @@ -0,0 +1,103 @@ +package utils + +import ( + "reflect" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru/v2/expirable" + "k8s.io/apimachinery/pkg/watch" +) + +func TestCooldownQueue_Enqueue(t *testing.T) { + type fields struct { + seenEvents *lru.LRU[string, bool] + innerChan chan watch.Event + ResultChan <-chan watch.Event + } + type args struct { + e watch.Event + } + tests := []struct { + name string + fields fields + args args + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + q := &CooldownQueue{ + seenEvents: tt.fields.seenEvents, + innerChan: tt.fields.innerChan, + ResultChan: tt.fields.ResultChan, + } + q.Enqueue(tt.args.e) + }) + } +} + +func TestCooldownQueue_Stop(t *testing.T) { + type fields struct { + seenEvents *lru.LRU[string, bool] + innerChan chan watch.Event + ResultChan <-chan watch.Event + } + tests := []struct { + name string + fields fields + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + q := &CooldownQueue{ + seenEvents: tt.fields.seenEvents, + innerChan: tt.fields.innerChan, + ResultChan: tt.fields.ResultChan, + } + q.Stop() + }) + } +} + +func TestNewCooldownQueue(t *testing.T) { + type args struct { + size int + cooldown time.Duration + } + tests := []struct { + name string + args args + want *CooldownQueue + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewCooldownQueue(tt.args.size, tt.args.cooldown); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewCooldownQueue() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_makeEventKey(t *testing.T) { + type args struct { + e watch.Event + } + tests := []struct { + name string + args args + want string + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := makeEventKey(tt.args.e); got != tt.want { + t.Errorf("makeEventKey() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/utils/testdata/pod.json b/utils/testdata/pod.json new file mode 100644 index 0000000..936fe1d --- /dev/null +++ b/utils/testdata/pod.json @@ -0,0 +1,165 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": "2023-11-15T15:19:53Z", + "generateName": "nginx-748c667d99-", + "labels": { + "app": "nginx", + "pod-template-hash": "748c667d99" + }, + "name": "nginx-748c667d99-6cw4b", + "namespace": "default", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ReplicaSet", + "name": "nginx-748c667d99", + "uid": "43aeb5db-771f-4483-9998-9ef1e2eed2ee" + } + ], + "resourceVersion": "129152", + "uid": "aa5e3e8f-2da5-4c38-93c0-210d3280d10f" + }, + "spec": { + "containers": [ + { + "image": "nginx", + "imagePullPolicy": "Always", + "name": "nginx", + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "kube-api-access-fszp8", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "nodeName": "kind-control-plane", + "preemptionPolicy": "PreemptLowerPriority", + "priority": 0, + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "kube-api-access-fszp8", + "projected": { + "defaultMode": 420, + "sources": [ + { + "serviceAccountToken": { + "expirationSeconds": 3607, + "path": "token" + } + }, + { + "configMap": { + "items": [ + { + "key": "ca.crt", + "path": "ca.crt" + } + ], + "name": "kube-root-ca.crt" + } + }, + { + "downwardAPI": { + "items": [ + { + "fieldRef": { + "apiVersion": "v1", + "fieldPath": "metadata.namespace" + }, + "path": "namespace" + } + ] + } + } + ] + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2023-11-15T15:19:53Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2023-11-15T15:19:55Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2023-11-15T15:19:55Z", + "status": "True", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2023-11-15T15:19:53Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "containerd://1bc63d7b22a02c347be84adb772c6991eaa25d56c252d21c8553a1b79ad515c1", + "image": "docker.io/library/nginx:latest", + "imageID": "docker.io/library/nginx@sha256:86e53c4c16a6a276b204b0fd3a8143d86547c967dc8258b3d47c3a21bb68d3c6", + "lastState": {}, + "name": "nginx", + "ready": true, + "restartCount": 0, + "started": true, + "state": { + "running": { + "startedAt": "2023-11-15T15:19:55Z" + } + } + } + ], + "hostIP": "172.18.0.2", + "phase": "Running", + "podIP": "10.244.0.41", + "podIPs": [ + { + "ip": "10.244.0.41" + } + ], + "qosClass": "BestEffort", + "startTime": "2023-11-15T15:19:53Z" + } +} diff --git a/utils/utils.go b/utils/utils.go index 0e0e2e9..1753799 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -55,6 +55,10 @@ func ContextFromIdentifiers(parent context.Context, id domain.ClientIdentifier) }) } +func ClientIdentifierFromContext(ctx context.Context) domain.ClientIdentifier { + return ctx.Value(domain.ContextKeyClientIdentifier).(domain.ClientIdentifier) +} + //goland:noinspection GoUnusedExportedFunction func CompareJson(a, b []byte) bool { var aData interface{} diff --git a/utils/utils_test.go b/utils/utils_test.go new file mode 100644 index 0000000..5a22680 --- /dev/null +++ b/utils/utils_test.go @@ -0,0 +1,99 @@ +package utils + +import ( + "context" + "os" + "testing" + + "github.com/kubescape/synchronizer/domain" + "github.com/stretchr/testify/assert" +) + +func fileContent(path string) []byte { + b, _ := os.ReadFile(path) + return b +} + +func TestCanonicalHash(t *testing.T) { + tests := []struct { + name string + in []byte + want string + wantErr bool + }{ + { + name: "error", + in: []byte("test"), + wantErr: true, + }, + { + name: "empty", + in: []byte("{}"), + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "simple", + in: []byte(`{"a":"b"}`), + want: "baf4fd048ca2e8f75d531af13c5869eaa8e38c3020e1dfcebe3c3ac019a3bab2", + }, + { + name: "pod", + in: fileContent("testdata/pod.json"), + want: "1ae52b23166388144c602360fb73dd68736e88943f6e16fab1bf07347484f8e8", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := CanonicalHash(tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("CanonicalHash() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestContextFromGeneric(t *testing.T) { + got := ContextFromGeneric(context.TODO(), domain.Generic{}) + assert.Equal(t, 0, got.Value(domain.ContextKeyDepth)) + assert.NotNil(t, got.Value(domain.ContextKeyMsgId)) +} + +func TestClientIdentifier_RoundTrip(t *testing.T) { + tests := []struct { + name string + id domain.ClientIdentifier + }{ + { + name: "empty", + id: domain.ClientIdentifier{}, + }, + { + name: "with account", + id: domain.ClientIdentifier{ + Account: "account", + }, + }, + { + name: "with cluster", + id: domain.ClientIdentifier{ + Cluster: "cluster", + }, + }, + { + name: "with account and cluster", + id: domain.ClientIdentifier{ + Account: "account", + Cluster: "cluster", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := ContextFromIdentifiers(context.TODO(), tt.id) + got := ClientIdentifierFromContext(ctx) + assert.Equal(t, tt.id, got) + }) + } +}