From 4a51ed92fdc3c521bae2dedefe4f80591789aa3b Mon Sep 17 00:00:00 2001 From: Craig Ingram Date: Mon, 2 Jun 2025 11:49:46 -0400 Subject: [PATCH 1/4] fixed lint issue with caching --- .github/workflows/pull_request.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 629c7491..36795308 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -26,9 +26,7 @@ jobs: uses: golangci/golangci-lint-action@v8 with: args: --timeout=3m --enable copyloopvar --out-format colored-line-number - skip-pkg-cache: true - skip-build-cache: true - + - name: Run integration tests run: make test From b94a8b7ae5c65fd0391e3fc185854f95f4ba6afa Mon Sep 17 00:00:00 2001 From: Craig Ingram Date: Mon, 2 Jun 2025 12:03:21 -0400 Subject: [PATCH 2/4] added version to linter to try to get latest --- .github/workflows/pull_request.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 36795308..655aadbe 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -25,6 +25,7 @@ jobs: - name: Run golangci-lint uses: golangci/golangci-lint-action@v8 with: + version: latest args: --timeout=3m --enable copyloopvar --out-format colored-line-number - name: Run integration tests From f3c7c8ea0f7b4882ac2c6f6cf182d95756cb6894 Mon Sep 17 00:00:00 2001 From: Craig Ingram Date: Mon, 2 Jun 2025 12:27:17 -0400 Subject: [PATCH 3/4] removed out-format as it was removed --- .github/workflows/pull_request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 655aadbe..7a0b0cec 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -26,7 +26,7 @@ jobs: uses: golangci/golangci-lint-action@v8 with: version: latest - args: --timeout=3m --enable copyloopvar --out-format colored-line-number + args: --timeout=3m --enable copyloopvar - name: Run integration tests run: make test From 7dca7d959e5b489de3d2176f9835d20874c7c4e1 Mon Sep 17 00:00:00 2001 From: Craig Ingram Date: Mon, 2 Jun 2025 15:43:43 -0400 Subject: [PATCH 4/4] Fixing lint issues --- pkg/redis/close_helpers.go | 58 +++++++++++++++ pkg/redis/connections.go | 10 +-- pkg/redis/const.go | 2 +- pkg/redis/fake/redis.go | 6 +- pkg/redis/fake/redis_test.go | 11 ++- pkg/redisnode/node.go | 24 +++++-- pkg/redisnode/node_test.go | 30 ++++++-- pkg/redisnode/redisnode.go | 38 +++++++--- pkg/redisnode/redisnode_test.go | 6 +- test/e2e/framework/logger.go | 4 +- test/e2e/operator.go | 124 ++++++++++++++++---------------- 11 files changed, 218 insertions(+), 95 deletions(-) create mode 100644 pkg/redis/close_helpers.go diff --git a/pkg/redis/close_helpers.go b/pkg/redis/close_helpers.go new file mode 100644 index 00000000..8bbbabb1 --- /dev/null +++ b/pkg/redis/close_helpers.go @@ -0,0 +1,58 @@ +package redis + +import ( + "io" + "testing" + + "github.com/golang/glog" +) + +// CloseWithLog safely closes a Closer and logs any error that occurs. +// This is a helper to reduce boilerplate error handling for Close() calls. +func CloseWithLog(closer io.Closer, context string) { + if err := closer.Close(); err != nil { + glog.Errorf("Error closing %s: %v", context, err) + } +} + +// CloseWithLogf safely closes a Closer and logs any error using formatted context. +// This is a helper to reduce boilerplate error handling for Close() calls. +func CloseWithLogf(closer io.Closer, format string, args ...interface{}) { + if err := closer.Close(); err != nil { + glog.Errorf("Error closing "+format+": %v", append(args, err)...) + } +} + +// DeferCloseWithLog returns a function that safely closes a Closer and logs any error. +// This is designed to be used with defer statements. +func DeferCloseWithLog(closer io.Closer, context string) func() { + return func() { + CloseWithLog(closer, context) + } +} + +// DeferCloseWithLogf returns a function that safely closes a Closer and logs any error with formatted context. +// This is designed to be used with defer statements. +func DeferCloseWithLogf(closer io.Closer, format string, args ...interface{}) func() { + return func() { + CloseWithLogf(closer, format, args...) + } +} + +// CloseWithTestLog safely closes a Closer and logs any error using testing.T. +// This is specifically for test contexts where glog might not be appropriate. +func CloseWithTestLog(t *testing.T, closer io.Closer, context string) { + if err := closer.Close(); err != nil { + if t != nil { + t.Logf("Error closing %s: %v", context, err) + } + } +} + +// DeferCloseWithTestLog returns a function that safely closes a Closer and logs any error using testing.T. +// This is designed to be used with defer statements in tests. +func DeferCloseWithTestLog(t *testing.T, closer io.Closer, context string) func() { + return func() { + CloseWithTestLog(t, closer, context) + } +} diff --git a/pkg/redis/connections.go b/pkg/redis/connections.go index 286113cb..dd38501c 100644 --- a/pkg/redis/connections.go +++ b/pkg/redis/connections.go @@ -84,7 +84,7 @@ func NewAdminConnections(ctx context.Context, addrs []string, options *AdminOpti // Close used to close all possible resources instantiated by the Connections func (cnx *AdminConnections) Close() { for _, c := range cnx.clients { - c.Close() + CloseWithLog(c, "client connection") } } @@ -98,7 +98,7 @@ func (cnx *AdminConnections) Add(ctx context.Context, addr string) error { // Remove disconnect and remove the client connection from the map func (cnx *AdminConnections) Remove(addr string) { if c, ok := cnx.clients[addr]; ok { - c.Close() + CloseWithLogf(c, "connection to %s", addr) delete(cnx.clients, addr) } } @@ -108,7 +108,7 @@ func (cnx *AdminConnections) Remove(addr string) { func (cnx *AdminConnections) Update(ctx context.Context, addr string) (ClientInterface, error) { // if already exist close the current connection if c, ok := cnx.clients[addr]; ok { - c.Close() + CloseWithLogf(c, "existing connection to %s", addr) } c, err := cnx.connect(ctx, addr) @@ -206,7 +206,7 @@ func (cnx *AdminConnections) ReplaceAll(ctx context.Context, addrs []string) { // Reset close all connections and clear the connection map func (cnx *AdminConnections) Reset() { for _, c := range cnx.clients { - c.Close() + CloseWithLog(c, "client connection") } cnx.clients = map[string]ClientInterface{} } @@ -288,7 +288,7 @@ func buildCommandReplaceMapping(filePath string) map[string]string { glog.Errorf("Cannot open %s: %v", filePath, err) return mapping } - defer file.Close() + defer DeferCloseWithLogf(file, "file %s", filePath)() scanner := bufio.NewScanner(file) for scanner.Scan() { diff --git a/pkg/redis/const.go b/pkg/redis/const.go index fac25652..c6cad110 100644 --- a/pkg/redis/const.go +++ b/pkg/redis/const.go @@ -24,5 +24,5 @@ const ( // Redis error constants const ( // ErrNotFound cannot find a node to connect to - ErrNotFound = "Unable to find a node to connect" + ErrNotFound = "unable to find a node to connect" ) diff --git a/pkg/redis/fake/redis.go b/pkg/redis/fake/redis.go index 7d3063f1..cff9d5f4 100644 --- a/pkg/redis/fake/redis.go +++ b/pkg/redis/fake/redis.go @@ -11,6 +11,8 @@ import ( "strings" "sync" "testing" + + "github.com/IBM/operator-for-redis-cluster/pkg/redis" ) // RedisServer Fake Redis Server struct @@ -43,7 +45,7 @@ func NewRedisServer(t *testing.T) *RedisServer { // Close possible resources func (r *RedisServer) Close() { - r.Ln.Close() + redis.CloseWithTestLog(r.test, r.Ln, "fake redis server listener") } // GetHostPort return the host port of redis server @@ -87,7 +89,7 @@ func (r *RedisServer) handleConnection() { break } } - defer conn.Close() + defer redis.DeferCloseWithTestLog(r.test, conn, "connection")() var wait sync.WaitGroup wait.Add(1) go func(conn net.Conn, wait *sync.WaitGroup) { diff --git a/pkg/redis/fake/redis_test.go b/pkg/redis/fake/redis_test.go index 3be7e0fb..dd54c715 100644 --- a/pkg/redis/fake/redis_test.go +++ b/pkg/redis/fake/redis_test.go @@ -114,7 +114,11 @@ func TestNewRedisServer(t *testing.T) { if err != nil { t.Errorf("Cannot connec to fake redis server: %v", err) } - defer conn.Close() + defer func() { + if err := conn.Close(); err != nil { + t.Logf("Error closing connection: %v", err) + } + }() testCases := []struct { input string @@ -127,7 +131,10 @@ func TestNewRedisServer(t *testing.T) { for i, tt := range testCases { // write to fake redis - fmt.Fprint(conn, tt.input) + if _, err := fmt.Fprint(conn, tt.input); err != nil { + t.Errorf("[test %d] Error writing to connection: %v", i, err) + continue + } //read from fake redis var message []string diff --git a/pkg/redisnode/node.go b/pkg/redisnode/node.go index 3edc651e..7c8185c2 100644 --- a/pkg/redisnode/node.go +++ b/pkg/redisnode/node.go @@ -128,7 +128,11 @@ func (n *Node) addSettingInConfigFile(line string) error { return fmt.Errorf("unable to set '%s' in config file, openfile error %s err:%v", line, n.config.Redis.ConfigFileName, err) } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + glog.Errorf("Error closing config file %s: %v", n.config.Redis.ConfigFileName, err) + } + }() _, err = f.WriteString(line + "\n") if err != nil { @@ -150,7 +154,11 @@ func (n *Node) getConfig(name string) (string, error) { return "", fmt.Errorf("unable to read a config from file, openfile error %s err:%v", configFile, err) } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + glog.Errorf("Error closing config file %s: %v", configFile, err) + } + }() scanner := bufio.NewScanner(f) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) @@ -216,7 +224,11 @@ func clearFolder(folder string) error { glog.Infof("Cannot access folder %s: %v", folder, err) return err } - defer d.Close() + defer func() { + if err := d.Close(); err != nil { + glog.Errorf("Error closing directory %s: %v", folder, err) + } + }() names, err := d.Readdirnames(-1) if err != nil { glog.Infof("Cannot read files in %s: %v", folder, err) @@ -243,7 +255,11 @@ func getPodMemoryLimit(memFilePath string) (uint64, error) { } return 0, err } - defer f.Close() + defer func() { + if err := f.Close(); err != nil { + glog.Errorf("Error closing memory limit file %s: %v", memFilePath, err) + } + }() memLimitStr, err := io.ReadAll(f) if err != nil { diff --git a/pkg/redisnode/node_test.go b/pkg/redisnode/node_test.go index fd930033..a9a1cfb8 100644 --- a/pkg/redisnode/node_test.go +++ b/pkg/redisnode/node_test.go @@ -72,20 +72,32 @@ cluster-node-timeout 321`, if createerr != nil { t.Errorf("Couldn' t create temporary config file: %v", createerr) } - defer os.RemoveAll(redisConfDir) - redisConfFile.Close() + defer func() { + if err := os.RemoveAll(redisConfDir); err != nil { + t.Logf("Error removing temp dir %s: %v", redisConfDir, err) + } + }() + if err := redisConfFile.Close(); err != nil { + t.Errorf("Error closing redis config file: %v", err) + } podInfoTempDir, _ := os.MkdirTemp("", "pod-info-test") memLimitFile, err := os.Create(filepath.Join(podInfoTempDir, "mem-limit")) if err != nil { t.Errorf("Couldn' t create temporary config file: %v", err) } - defer os.RemoveAll(podInfoTempDir) + defer func() { + if err := os.RemoveAll(podInfoTempDir); err != nil { + t.Logf("Error removing temp dir %s: %v", podInfoTempDir, err) + } + }() _, err = memLimitFile.Write([]byte(tc.podRequestLimit)) if err != nil { t.Errorf("Couldn't write to temporary config file: %v", err) } - memLimitFile.Close() + if err := memLimitFile.Close(); err != nil { + t.Errorf("Error closing memory limit file: %v", err) + } var additionalConfigFileNames []string additionalConfDir, _ := os.MkdirTemp("", "additional-redisconf") @@ -99,8 +111,14 @@ cluster-node-timeout 321`, if err != nil { t.Errorf("Couldn't write to temporary config file: %v", err) } - defer os.RemoveAll(redisConfDir) - configFile.Close() + defer func() { + if err := os.RemoveAll(redisConfDir); err != nil { + t.Logf("Error removing temp dir %s: %v", redisConfDir, err) + } + }() + if err := configFile.Close(); err != nil { + t.Errorf("Error closing config file: %v", err) + } } a := admin.NewFakeAdmin() diff --git a/pkg/redisnode/redisnode.go b/pkg/redisnode/redisnode.go index 7c67ad0c..3ec27597 100644 --- a/pkg/redisnode/redisnode.go +++ b/pkg/redisnode/redisnode.go @@ -269,29 +269,37 @@ func (r *RedisNode) configureHealth(ctx context.Context) error { func readinessCheck(ctx context.Context, addr string) error { client, rediserr := redis.NewClient(ctx, addr, time.Second, map[string]string{}) // will fail if node not accessible or slot range not set if rediserr != nil { - return fmt.Errorf("Readiness failed, err: %v", rediserr) + return fmt.Errorf("readiness failed, err: %v", rediserr) } - defer client.Close() + defer func() { + if err := client.Close(); err != nil { + glog.Errorf("Error closing redis client in readiness check: %v", err) + } + }() var resp radix.ClusterTopo err := client.DoCmd(ctx, &resp, "CLUSTER", "SLOTS") if err != nil { - return fmt.Errorf("Readiness failed, cluster slots response err: %v", err) + return fmt.Errorf("readiness failed, cluster slots response err: %v", err) } if len(resp) == 0 { - return fmt.Errorf("Readiness failed, cluster slots response empty") + return fmt.Errorf("readiness failed, cluster slots response empty") } - glog.V(6).Info("Readiness probe ok") + glog.V(6).Info("readiness probe ok") return nil } func livenessCheck(ctx context.Context, addr string) error { client, rediserr := redis.NewClient(ctx, addr, time.Second, map[string]string{}) // will fail if node not accessible or slot range not set if rediserr != nil { - return fmt.Errorf("Liveness failed, err: %v", rediserr) + return fmt.Errorf("liveness failed, err: %v", rediserr) } - defer client.Close() - glog.V(6).Info("Liveness probe ok") + defer func() { + if err := client.Close(); err != nil { + glog.Errorf("Error closing redis client in liveness check: %v", err) + } + }() + glog.V(6).Info("liveness probe ok") return nil } @@ -349,14 +357,22 @@ func testAndWaitConnection(ctx context.Context, addr string, maxWait time.Durati time.Sleep(100 * time.Millisecond) continue } - defer client.Close() + defer func() { + if err := client.Close(); err != nil { + glog.Errorf("Error closing test connection: %v", err) + } + }() var resp string if err := client.Do(ctx, radix.Cmd(&resp, "PING")); err != nil { - client.Close() + if err := client.Close(); err != nil { + glog.Errorf("Error closing client after PING error: %v", err) + } time.Sleep(100 * time.Millisecond) continue } else if resp != "PONG" { - client.Close() + if err := client.Close(); err != nil { + glog.Errorf("Error closing client after wrong PING response: %v", err) + } time.Sleep(100 * time.Millisecond) continue } diff --git a/pkg/redisnode/redisnode_test.go b/pkg/redisnode/redisnode_test.go index 3c872f39..c227401e 100644 --- a/pkg/redisnode/redisnode_test.go +++ b/pkg/redisnode/redisnode_test.go @@ -118,7 +118,11 @@ func TestRedisInitializationAttach(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.Remove(tmpfile.Name()) // clean up + defer func() { + if err := os.Remove(tmpfile.Name()); err != nil { + t.Logf("Error removing temp file %s: %v", tmpfile.Name(), err) + } + }() // clean up c := &Config{ Redis: config.Redis{ServerPort: "1234", ConfigFileName: tmpfile.Name()}, diff --git a/test/e2e/framework/logger.go b/test/e2e/framework/logger.go index e81b56d2..9af7190e 100644 --- a/test/e2e/framework/logger.go +++ b/test/e2e/framework/logger.go @@ -12,7 +12,9 @@ func nowStamp() string { } func log(level string, format string, args ...interface{}) { - fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) + if _, err := fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...); err != nil { + ginkgo.Fail(fmt.Sprintf("failed to log: %v", err), 1) + } } // Logf logs in e2e framework diff --git a/test/e2e/operator.go b/test/e2e/operator.go index 1ee11318..223e5037 100644 --- a/test/e2e/operator.go +++ b/test/e2e/operator.go @@ -6,15 +6,15 @@ import ( api "k8s.io/api/core/v1" kclient "sigs.k8s.io/controller-runtime/pkg/client" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" rapi "github.com/IBM/operator-for-redis-cluster/api/v1alpha1" "github.com/IBM/operator-for-redis-cluster/test/e2e/framework" ) func deleteRedisCluster(kubeClient kclient.Client, cluster *rapi.RedisCluster) { - Expect(kubeClient.Delete(context.Background(), cluster)).To(Succeed()) + gomega.Expect(kubeClient.Delete(context.Background(), cluster)).To(gomega.Succeed()) } const ( @@ -28,31 +28,31 @@ var cluster *rapi.RedisCluster const clusterName = "cluster1" const clusterNs = api.NamespaceDefault -var _ = BeforeSuite(func() { +var _ = ginkgo.BeforeSuite(func() { kubeClient = framework.BuildAndSetClients() }) -var _ = AfterSuite(func() { +var _ = ginkgo.AfterSuite(func() { deleteRedisCluster(kubeClient, cluster) }) -var _ = Describe("RedisCluster CRUD operations", func() { - It("should create a RedisCluster", func() { +var _ = ginkgo.Describe("RedisCluster CRUD operations", func() { + ginkgo.It("should create a RedisCluster", func() { cluster = framework.NewRedisCluster(clusterName, clusterNs, framework.FrameworkContext.ImageTag, defaultPrimaries, defaultReplicas) - Eventually(framework.CreateRedisNodeServiceAccountFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.CreateRedisNodeServiceAccountFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.CreateRedisClusterFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.CreateRedisClusterFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.CreateRedisClusterConfigMapFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.CreateRedisClusterConfigMapFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsPodDisruptionBudgetCreatedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsPodDisruptionBudgetCreatedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "8m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "8m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(gomega.HaveOccurred()) }) - Context("a RedisCluster is created", func() { - It("should update RedisCluster server config", func() { + ginkgo.Context("a RedisCluster is created", func() { + ginkgo.It("should update RedisCluster server config", func() { newConfig := map[string]string{ "redis.yaml": ` maxmemory-policy: volatile-lfu @@ -65,103 +65,103 @@ maxmemory 4gb cluster-enabled yes lazyfree-lazy-expire yes`, } - Eventually(framework.UpdateRedisClusterConfigMapFunc(kubeClient, cluster, newConfig), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateRedisClusterConfigMapFunc(kubeClient, cluster, newConfig), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.GetConfigUpdateEventFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.GetConfigUpdateEventFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) - It("should update the RedisCluster", func() { + ginkgo.It("should update the RedisCluster", func() { newTag := "new" cluster = framework.NewRedisCluster(clusterName, clusterNs, newTag, defaultPrimaries, defaultReplicas) - Eventually(framework.UpdateRedisClusterFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateRedisClusterFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsPodSpecUpdatedFunc(kubeClient, cluster, newTag), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsPodSpecUpdatedFunc(kubeClient, cluster, newTag), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) - It("should scale up the RedisCluster", func() { + ginkgo.It("should scale up the RedisCluster", func() { nbPrimary := int32(4) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, nil), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, nil), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(gomega.HaveOccurred()) }) - Context("a RedisCluster is running", func() { - When("the number of primaries is reduced", func() { - It("should scale down the RedisCluster", func() { + ginkgo.Context("a RedisCluster is running", func() { + ginkgo.When("the number of primaries is reduced", func() { + ginkgo.It("should scale down the RedisCluster", func() { nbPrimary := int32(3) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, nil), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, nil), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of replicas is increased", func() { - It("should create additional replicas for each primary in the RedisCluster", func() { + ginkgo.When("the number of replicas is increased", func() { + ginkgo.It("should create additional replicas for each primary in the RedisCluster", func() { replicas := int32(2) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, nil, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, nil, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of replicas is decreased", func() { - It("should delete replicas for each primary in the RedisCluster", func() { + ginkgo.When("the number of replicas is decreased", func() { + ginkgo.It("should delete replicas for each primary in the RedisCluster", func() { replicas := int32(1) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, nil, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, nil, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "10s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of primaries is decreased and the number of replicas is increased", func() { - It("should scale down the primaries and create additional replicas in the RedisCluster", func() { + ginkgo.When("the number of primaries is decreased and the number of replicas is increased", func() { + ginkgo.It("should scale down the primaries and create additional replicas in the RedisCluster", func() { nbPrimary := int32(2) replicas := int32(2) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of primaries is increased and the number of replicas is decreased", func() { - It("should scale up the primaries and delete replicas in the RedisCluster", func() { + ginkgo.When("the number of primaries is increased and the number of replicas is decreased", func() { + ginkgo.It("should scale up the primaries and delete replicas in the RedisCluster", func() { nbPrimary := int32(3) replicas := int32(1) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of primaries is increased and the number of replicas is increased", func() { - It("should scale up the primaries and create additional replicas in the RedisCluster", func() { + ginkgo.When("the number of primaries is increased and the number of replicas is increased", func() { + ginkgo.It("should scale up the primaries and create additional replicas in the RedisCluster", func() { nbPrimary := int32(4) replicas := int32(2) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) - When("the number of primaries is decreased and the number of replicas is decreased", func() { - It("should scale down the primaries and delete replicas in the RedisCluster", func() { + ginkgo.When("the number of primaries is decreased and the number of replicas is decreased", func() { + ginkgo.It("should scale down the primaries and delete replicas in the RedisCluster", func() { nbPrimary := int32(3) replicas := int32(1) - Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.UpdateConfigRedisClusterFunc(kubeClient, cluster, &nbPrimary, &replicas), "5s", "1s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.IsRedisClusterStartedFunc(kubeClient, cluster), "5m", "5s").ShouldNot(gomega.HaveOccurred()) - Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(HaveOccurred()) + gomega.Eventually(framework.ZonesBalancedFunc(kubeClient, cluster), "5s", "1s").ShouldNot(gomega.HaveOccurred()) }) }) })