diff --git a/.evergreen.yml b/.evergreen.yml index b4a558a78..99bda60b7 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -167,6 +167,7 @@ task_groups: - e2e_test_replica_set - e2e_test_replica_set_readiness_probe - e2e_test_replica_set_scale + - e2e_test_replica_set_scale_down - e2e_test_replica_set_change_version - e2e_test_feature_compatibility_version - e2e_test_feature_compatibility_version_upgrade @@ -279,6 +280,13 @@ tasks: vars: test: replica_set_scale + - name: e2e_test_replica_set_scale_down + exec_timeout_secs: 3600 + commands: + - func: run_e2e_test + vars: + test: replica_set_scale_down + - name: e2e_test_replica_set_change_version commands: - func: run_e2e_test diff --git a/agent/Dockerfile b/agent/Dockerfile index d7df0e3b5..2e614163d 100644 --- a/agent/Dockerfile +++ b/agent/Dockerfile @@ -19,9 +19,9 @@ RUN mkdir -p agent \ && chmod -R +r /var/lib/automation/config \ && rm agent/mongodb-agent.tar.gz \ && rm -r mongodb-mms-automation-agent-* - RUN mkdir -p /var/lib/mongodb-mms-automation/probes/ \ - && curl --retry 3 https://readinessprobe.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ +# && curl --retry 3 https://readinessprobe.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ + && curl --retry 3 https://readiness-probe-scale-test.s3.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ && chmod +x /var/lib/mongodb-mms-automation/probes/readinessprobe \ && mkdir -p /var/log/mongodb-mms-automation/ \ && chmod -R +wr /var/log/mongodb-mms-automation/ \ diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go index 21b1d113a..e5bef059c 100644 --- a/cmd/testrunner/main.go +++ b/cmd/testrunner/main.go @@ -271,7 +271,8 @@ func withTest(test string) func(obj runtime.Object) { "--kubeconfig", "/etc/config/kubeconfig", "--go-test-flags", - "-timeout=20m", + // TODO: allow this to be configurable per test, this is only large due to scale down test + "-timeout=60m", } } } diff --git a/deploy/crds/mongodb.com_mongodb_crd.yaml b/deploy/crds/mongodb.com_mongodb_crd.yaml index 402da921d..b7143e0e6 100644 --- a/deploy/crds/mongodb.com_mongodb_crd.yaml +++ b/deploy/crds/mongodb.com_mongodb_crd.yaml @@ -175,7 +175,9 @@ spec: status: description: MongoDBStatus defines the observed state of MongoDB properties: - members: + currentStatefulSetReplicas: + type: integer + currentMongoDBMembers: type: integer message: type: string @@ -183,10 +185,6 @@ spec: type: string phase: type: string - required: - - members - - mongoUri - - phase type: object type: object version: v1 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 5b4410a65..56f163405 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -31,7 +31,8 @@ spec: - name: OPERATOR_NAME value: "mongodb-kubernetes-operator" - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/mongodb/mongodb-agent:10.19.0.6562-1 + value: quay.io/mongodb/mongodb-agent-dev:scaledown +# value: quay.io/mongodb/mongodb-agent:10.19.0.6562-1 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 - name: MONGODB_IMAGE diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 493b77d0b..e87d46981 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -201,8 +201,11 @@ type AuthMode string type MongoDBStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` - Members int `json:"members"` - Message string `json:"message,omitempty"` + + CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"` + CurrentMongoDBMembers int `json:"currentMongoDBMembers"` + + Message string `json:"message,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -220,16 +223,13 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m MongoDB) DesiredReplicas() int { - return m.Spec.Members -} - -func (m MongoDB) CurrentReplicas() int { - return m.Status.Members -} - -func (m *MongoDB) ReplicasThisReconciliation() int { - return scale.ReplicasThisReconciliation(m) +func (m MongoDB) AutomationConfigMembersThisReconciliation() int { + // determine the correct number of automation config replica set members + // based on our desired number, and our current number + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.Spec.Members, + current: m.Status.CurrentMongoDBMembers, + }) } // MongoURI returns a mongo uri which can be used to connect to this deployment @@ -298,6 +298,30 @@ func (m MongoDB) GetFCV() string { return strings.Join(parts[:minorIndex+1], ".") } +func (m MongoDB) DesiredReplicas() int { + return m.Spec.Members +} + +func (m MongoDB) CurrentReplicas() int { + return m.Status.CurrentStatefulSetReplicas +} + +func (m *MongoDB) StatefulSetReplicasThisReconciliation() int { + return scale.ReplicasThisReconciliation(m) +} + +type automationConfigReplicasScaler struct { + current, desired int +} + +func (a automationConfigReplicasScaler) DesiredReplicas() int { + return a.desired +} + +func (a automationConfigReplicasScaler) CurrentReplicas() int { + return a.current +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MongoDBList contains a list of MongoDB diff --git a/pkg/controller/mongodb/build_statefulset_test.go b/pkg/controller/mongodb/build_statefulset_test.go index bc7cd81ca..83d151ec0 100644 --- a/pkg/controller/mongodb/build_statefulset_test.go +++ b/pkg/controller/mongodb/build_statefulset_test.go @@ -54,7 +54,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDB, sts *app assert.Equal(t, "agent-image", agentContainer.Image) probe := agentContainer.ReadinessProbe assert.True(t, reflect.DeepEqual(probes.New(defaultReadiness()), *probe)) - assert.Equal(t, int32(240), probe.FailureThreshold) + assert.Equal(t, probes.New(defaultReadiness()).FailureThreshold, probe.FailureThreshold) assert.Equal(t, int32(5), probe.InitialDelaySeconds) assert.Len(t, agentContainer.VolumeMounts, 4) diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index 909a02877..c0fcc9f66 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -1,9 +1,8 @@ package mongodb import ( - "time" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" "go.uber.org/zap" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -16,6 +15,7 @@ type severity string const ( Info severity = "INFO" + Debug severity = "DEBUG" Warn severity = "WARN" Error severity = "ERROR" None severity = "NONE" @@ -56,28 +56,9 @@ func (m mongoUriOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (m mongoUriOption) GetResult() (reconcile.Result, error) { - return okResult() + return result.OK() } -func (o *optionBuilder) withMembers(members int) *optionBuilder { - o.options = append(o.options, - membersOption{ - members: members, - }) - return o -} - -type membersOption struct { - members int -} - -func (m membersOption) ApplyOption(mdb *mdbv1.MongoDB) { - mdb.Status.Members = m.members -} - -func (m membersOption) GetResult() (reconcile.Result, error) { - return okResult() -} func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { o.options = append(o.options, phaseOption{ @@ -99,18 +80,35 @@ type messageOption struct { func (m messageOption) ApplyOption(mdb *mdbv1.MongoDB) { mdb.Status.Message = m.message.messageString if m.message.severityLevel == Error { - zap.S().Error(m.message) + zap.S().Error(m.message.messageString) } if m.message.severityLevel == Warn { - zap.S().Warn(m.message) + zap.S().Warn(m.message.messageString) } if m.message.severityLevel == Info { - zap.S().Info(m.message) + zap.S().Info(m.message.messageString) + } + if m.message.severityLevel == Debug { + zap.S().Debug(m.message.messageString) } } func (m messageOption) GetResult() (reconcile.Result, error) { - return okResult() + return result.OK() +} + +func (o *optionBuilder) withMongoDBMembers(members int) *optionBuilder { + o.options = append(o.options, mongoDBReplicasOption{ + mongoDBMembers: members, + }) + return o +} + +func (o *optionBuilder) withStatefulSetReplicas(members int) *optionBuilder { + o.options = append(o.options, statefulSetReplicasOption{ + replicas: members, + }) + return o } func (o *optionBuilder) withMessage(severityLevel severity, msg string) *optionBuilder { @@ -146,28 +144,37 @@ func (p phaseOption) ApplyOption(mdb *mdbv1.MongoDB) { func (p phaseOption) GetResult() (reconcile.Result, error) { if p.phase == mdbv1.Running { - return okResult() + return result.OK() } if p.phase == mdbv1.Pending { - return retryResult(p.retryAfter) + return result.Retry(p.retryAfter) } if p.phase == mdbv1.Failed { - return failedResult() + return result.Failed() } - return okResult() + return result.OK() +} + +type mongoDBReplicasOption struct { + mongoDBMembers int } -// helper functions which return reconciliation results which should be -// returned from the main reconciliation loop +func (a mongoDBReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.CurrentMongoDBMembers = a.mongoDBMembers +} + +func (a mongoDBReplicasOption) GetResult() (reconcile.Result, error) { + return result.OK() +} -func okResult() (reconcile.Result, error) { - return reconcile.Result{}, nil +type statefulSetReplicasOption struct { + replicas int } -func retryResult(after int) (reconcile.Result, error) { - return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil +func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.CurrentStatefulSetReplicas = s.replicas } -func failedResult() (reconcile.Result, error) { - return retryResult(0) +func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { + return result.OK() } diff --git a/pkg/controller/mongodb/mongodb_status_options_test.go b/pkg/controller/mongodb/mongodb_status_options_test.go index 757732159..f55ba0518 100644 --- a/pkg/controller/mongodb/mongodb_status_options_test.go +++ b/pkg/controller/mongodb/mongodb_status_options_test.go @@ -22,19 +22,6 @@ func TestMongoUriOption_ApplyOption(t *testing.T) { assert.Equal(t, "my-uri", mdb.Status.MongoURI, "Status should be updated") } -func TestMembersOption_ApplyOption(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") - - opt := membersOption{ - members: 5, - } - - opt.ApplyOption(&mdb) - - assert.Equal(t, 3, mdb.Spec.Members, "Spec should remain unchanged") - assert.Equal(t, 5, mdb.Status.Members, "Status should be updated") -} - func TestOptionBuilder_RunningPhase(t *testing.T) { mdb := newReplicaSet(3, "my-rs", "my-ns") diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index c68a6eec8..f2840ac49 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -9,6 +9,8 @@ import ( "os" "strings" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" @@ -142,9 +144,6 @@ func add(mgr manager.Manager, r *ReplicaSetReconciler) error { return nil } -// blank assignment to verify that ReplicaSetReconciler implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReplicaSetReconciler{} - // ReplicaSetReconciler reconciles a MongoDB ReplicaSet type ReplicaSetReconciler struct { // This client, initialized using mgr.Client() above, is a split client @@ -172,19 +171,17 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue - return reconcile.Result{}, nil + return result.OK() } r.log.Errorf("Error reconciling MongoDB resource: %s", err) // Error reading the object - requeue the request. - return reconcile.Result{}, err + return result.Failed() } r.log = zap.S().With("ReplicaSet", request.NamespacedName) - r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status, - "desiredMembers", mdb.DesiredReplicas(), - "currentMembers", mdb.CurrentReplicas(), - ) + r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) + r.log.Debug("Ensuring Automation Config for deployment") if err := r.ensureAutomationConfig(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). @@ -202,6 +199,31 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } + // if we're scaling down, we need to wait until the StatefulSet is at the + // desired number of replicas. Scaling down has to happen one member at a time + if scale.IsScalingDown(mdb) { + res, err := checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(r.client, r.client.Status(), mdb) + + if err != nil { + r.log.Errorf("Error checking if StatefulSet members have been removed from the automation config: %s", err) + return result.Failed() + } + + if result.ShouldRequeue(res, err) { + r.log.Debugf("The expected number of Stateful Set members for scale down are not yet ready, requeuing reconciliation") + return result.Retry(10) + } + } + + // at this stage we know we have successfully updated the automation config with the correct number of + // members and the stateful set has the expected number of ready replicas. We can update our status + // so we calculate these fields correctly going forward + if err := updateScalingStatus(r.client.Status(), mdb); err != nil { + r.log.Errorf("Failed updating the status of the MongoDB resource: %s", err) + return result.Failed() + } + + r.log.Debug("Validating TLS Config") isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { return status.Update(r.client.Status(), &mdb, @@ -227,24 +249,21 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } - currentSts := appsv1.StatefulSet{} - if err := r.client.Get(context.TODO(), mdb.NamespacedName(), ¤tSts); err != nil { - errMsg := err.Error() - if !apiErrors.IsNotFound(err) { - errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) - } - return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, errMsg). - withFailedPhase(), + currentSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + if err != nil { + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error getting StatefulSet: %s", err)). + withFailedPhase(), ) - } r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", getUpdateStrategyType(mdb)) ready, err := r.isStatefulSetReady(mdb, ¤tSts) if err != nil { return status.Update(r.client.Status(), &mdb, - statusOptions().withMessage(Error, fmt.Sprintf("Error checking StatefulSet status: %s", err)). + statusOptions(). + withMessage(Error, fmt.Sprintf("Error checking StatefulSet status: %s", err)). withFailedPhase(), ) } @@ -252,7 +271,9 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !ready { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMembers(int(currentSts.Status.ReadyReplicas)). + // need to update the current replicas as they get ready so eventually the desired number becomes + // ready one at a time + withStatefulSetReplicas(int(currentSts.Status.ReadyReplicas)). withMessage(Info, fmt.Sprintf("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name)). withPendingPhase(10), ) @@ -262,6 +283,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). withFailedPhase(), ) @@ -276,6 +298,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error setting annotations: %s", err)). withFailedPhase(), ) @@ -284,33 +307,29 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.completeTLSRollout(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error completing TLS rollout: %s", err)). withFailedPhase(), ) } - r.log.Debug("Updating MongoDB Status") - if err := r.client.Get(context.TODO(), mdb.NamespacedName(), &mdb); err != nil { - return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, fmt.Sprintf("could not get get resource: %s", err)). - withFailedPhase(), - ) - } - if scale.IsStillScaling(mdb) { return status.Update(r.client.Status(), &mdb, statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withMembers(mdb.ReplicasThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). withPendingPhase(0), ) } - res, err := status.Update(r.client.Status(), &mdb, statusOptions(). - withMongoURI(mdb.MongoURI()). - withMembers(mdb.ReplicasThisReconciliation()). - withMessage(None, ""). - withRunningPhase(), + res, err := status.Update(r.client.Status(), &mdb, + statusOptions(). + withMongoURI(mdb.MongoURI()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withMessage(None, ""). + withRunningPhase(), ) if err != nil { @@ -360,7 +379,8 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := statefulset.IsReady(*existingStatefulSet, mdb.ReplicasThisReconciliation()) + isReady := statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) + if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ @@ -405,7 +425,7 @@ func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDB) erro return nil } -// setAnnotations updates the monogdb resource annotations by applying the provided annotations +// setAnnotations updates the mongodb resource annotations by applying the provided annotations // on top of the existing ones func (r ReplicaSetReconciler) setAnnotations(nsName types.NamespacedName, annotations map[string]string) error { mdb := mdbv1.MongoDB{} @@ -429,12 +449,13 @@ func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { func buildAutomationConfig(mdb mdbv1.MongoDB, mdbVersionConfig automationconfig.MongoDbVersionConfig, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDNSName)) + zap.S().Debugw("AutomationConfigMembersThisReconciliation", "mdb.AutomationConfigMembersThisReconciliation()", mdb.AutomationConfigMembersThisReconciliation()) builder := automationconfig.NewBuilder(). SetTopology(automationconfig.ReplicaSetTopology). SetName(mdb.Name). SetDomain(domain). - SetMembers(mdb.Spec.Members). + SetMembers(mdb.AutomationConfigMembersThisReconciliation()). SetPreviousAutomationConfig(currentAc). SetMongoDBVersion(mdb.Spec.Version). SetFCV(mdb.GetFCV()). @@ -484,7 +505,7 @@ func versionManifestFromBytes(bytes []byte) (automationconfig.VersionManifest, e // buildService creates a Service that will be used for the Replica Set StatefulSet // that allows all the members of the STS to see each other. -// TODO: Make sure this Service is as minimal as posible, to not interfere with +// TODO: Make sure this Service is as minimal as possible, to not interfere with // future implementations and Service Discovery mechanisms we might implement. func buildService(mdb mdbv1.MongoDB) corev1.Service { label := make(map[string]string) @@ -717,7 +738,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(mdb.Spec.Members), + statefulset.WithReplicas(mdb.StatefulSetReplicasThisReconciliation()), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( @@ -756,7 +777,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(240), + probes.WithFailureThreshold(60), // TODO: this value needs further consideration probes.WithInitialDelaySeconds(5), ) } diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index bfa373045..ee846a537 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -412,7 +412,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 5, mdb.Status.Members) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) // scale members from five to three mdb.Spec.Members = 3 @@ -424,13 +424,14 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin res, err = r.Reconcile(reconcile.Request{NamespacedName: mdb.NamespacedName()}) + makeStatefulSetReady(t, mgr.GetClient(), mdb) assert.NoError(t, err) err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.Members) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -441,7 +442,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { @@ -454,7 +455,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) // scale members from three to five mdb.Spec.Members = 5 @@ -472,7 +473,9 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.Members) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) + + makeStatefulSetReady(t, mgr.GetClient(), mdb) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -484,7 +487,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 5, mdb.Status.Members) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) } func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { @@ -521,7 +524,7 @@ func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestOpenshift_Configuration(t *testing.T) { @@ -632,11 +635,21 @@ func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err e // makeStatefulSetReady updates the StatefulSet corresponding to the // provided MongoDB resource to mark it as ready for the case of `statefulset.IsReady` func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { + setStatefulSetReadyReplicas(t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) +} + +// makeStatefulSetUnReady updates the StatefulSet corresponding to the +// provided MongoDB resource to mark it as unready. +func makeStatefulSetUnReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { + setStatefulSetReadyReplicas(t, c, mdb, 0) +} + +func setStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB, readyReplicas int) { sts := appsv1.StatefulSet{} err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) assert.NoError(t, err) - sts.Status.ReadyReplicas = int32(mdb.ReplicasThisReconciliation()) - sts.Status.UpdatedReplicas = int32(mdb.ReplicasThisReconciliation()) + sts.Status.ReadyReplicas = int32(readyReplicas) + sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) err = c.Update(context.TODO(), &sts) assert.NoError(t, err) } diff --git a/pkg/controller/mongodb/replicaset_scaledown.go b/pkg/controller/mongodb/replicaset_scaledown.go new file mode 100644 index 000000000..ebcfab1b3 --- /dev/null +++ b/pkg/controller/mongodb/replicaset_scaledown.go @@ -0,0 +1,58 @@ +package mongodb + +import ( + "fmt" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// updateScalingStatus updates the status fields which are required to keep track of the current +// scaling state of the resource +func updateScalingStatus(statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) error { + _, err := status.Update(statusWriter, &mdb, + statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()), + ) + return err +} + +// checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig ensures that the expected number of StatefulSet +// replicas are ready. When a member has its process removed from the Automation Config, the pod will eventually +// become unready. We use this information to determine if we are safe to continue the reconciliation process. +func checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(stsGetter statefulset.Getter, statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) (reconcile.Result, error) { + isAtDesiredReplicaCount, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(stsGetter, mdb) + if err != nil { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error determining state of StatefulSet: %s", err)). + withFailedPhase(), + ) + } + + if !isAtDesiredReplicaCount { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("Not yet at the desired number of replicas, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withPendingPhase(10), + ) + } + return result.OK() +} + +// hasReachedDesiredNumberOfStatefulSetReplicasReady checks to see if the StatefulSet corresponding +// to the given MongoDB resource has the expected number of ready replicas. +func hasReachedDesiredNumberOfStatefulSetReplicasReady(stsGetter statefulset.Getter, mdb mdbv1.MongoDB) (bool, error) { + sts, err := stsGetter.GetStatefulSet(mdb.NamespacedName()) + if err != nil { + return false, err + } + desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) + return sts.Status.ReadyReplicas == desiredReadyReplicas, nil +} diff --git a/pkg/controller/mongodb/replicaset_scaledown_test.go b/pkg/controller/mongodb/replicaset_scaledown_test.go new file mode 100644 index 000000000..bfd066c54 --- /dev/null +++ b/pkg/controller/mongodb/replicaset_scaledown_test.go @@ -0,0 +1,93 @@ +package mongodb + +import ( + "context" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestUpdateScalingStatus(t *testing.T) { + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + + assert.Equal(t, 0, mdb.Status.CurrentStatefulSetReplicas) + assert.Equal(t, 0, mdb.Status.CurrentMongoDBMembers) + + expectedAutomationConfigMembers := mdb.AutomationConfigMembersThisReconciliation() + expectedStatefulSetReplicas := mdb.StatefulSetReplicasThisReconciliation() + + err := updateScalingStatus(mgr.Client, mdb) + assert.NoError(t, err) + + err = mgr.Client.Get(context.TODO(), mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, expectedAutomationConfigMembers, mdb.Status.CurrentStatefulSetReplicas) + assert.Equal(t, expectedStatefulSetReplicas, mdb.Status.CurrentMongoDBMembers) +} + +func TestHasReachedDesiredNumberOfStatefulSetReplicasReady(t *testing.T) { + createStatefulSet := func(c k8sClient.Client, mdb mdbv1.MongoDB) error { + replicas := int32(mdb.Spec.Members) + return c.Create(context.TODO(), &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: mdb.Name, + Namespace: mdb.Namespace, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + }) + } + + t.Run("There is an error when the StatefulSet does not exist", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + + // Act + _, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.Error(t, err) + }) + + t.Run("Returns true when the StatefulSet exists and is ready", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + err := createStatefulSet(mgr.Client, mdb) + assert.NoError(t, err) + makeStatefulSetReady(t, mgr.Client, mdb) + + // Act + hasReached, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.NoError(t, err, "should be no error when the StatefulSet exists") + assert.True(t, hasReached, "Should not be ready when the stateful set is not ready") + }) + + t.Run("Returns false when the StatefulSet exists and is not ready", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + err := createStatefulSet(mgr.Client, mdb) + assert.NoError(t, err) + makeStatefulSetUnReady(t, mgr.Client, mdb) + + // Act + hasReached, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.NoError(t, err, "should be no error when the StatefulSet exists") + assert.False(t, hasReached, "Should not be ready when the stateful set is not ready") + }) + +} diff --git a/pkg/util/result/reconciliationresults.go b/pkg/util/result/reconciliationresults.go new file mode 100644 index 000000000..2bfbc8b71 --- /dev/null +++ b/pkg/util/result/reconciliationresults.go @@ -0,0 +1,23 @@ +package result + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func ShouldRequeue(result reconcile.Result, err error) bool { + return err != nil || result.Requeue || result.RequeueAfter > 0 +} + +func OK() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func Retry(after int) (reconcile.Result, error) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil +} + +func Failed() (reconcile.Result, error) { + return Retry(0) +} diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go index 40a957a6b..60c7f9adb 100644 --- a/pkg/util/scale/scale.go +++ b/pkg/util/scale/scale.go @@ -16,7 +16,7 @@ func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { return replicaSetScaler.DesiredReplicas() } - if isScalingDown(replicaSetScaler) { + if IsScalingDown(replicaSetScaler) { return replicaSetScaler.CurrentReplicas() - 1 } @@ -28,7 +28,7 @@ func IsStillScaling(replicaSetScaler ReplicaSetScaler) bool { return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicas() } -func isScalingDown(replicaSetScaler ReplicaSetScaler) bool { +func IsScalingDown(replicaSetScaler ReplicaSetScaler) bool { return replicaSetScaler.DesiredReplicas() < replicaSetScaler.CurrentReplicas() } diff --git a/test/e2e/e2eutil.go b/test/e2e/e2eutil.go index ec8a55890..6325aaf54 100644 --- a/test/e2e/e2eutil.go +++ b/test/e2e/e2eutil.go @@ -91,6 +91,14 @@ func WaitForStatefulSetToBeReady(t *testing.T, mdb *mdbv1.MongoDB, retryInterval }) } +// WaitForStatefulSetToBeReadyAfterScaleDown waits for just the ready replicas to be correct +// and does not account for the updated replicas +func WaitForStatefulSetToBeReadyAfterScaleDown(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration) error { + return waitForStatefulSetCondition(t, mdb, retryInterval, timeout, func(sts appsv1.StatefulSet) bool { + return int32(mdb.Spec.Members) == sts.Status.ReadyReplicas + }) +} + func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration, condition func(set appsv1.StatefulSet) bool) error { _, err := WaitForStatefulSetToExist(mdb.Name, retryInterval, timeout) if err != nil { @@ -103,7 +111,8 @@ func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDB, retryInterval if err != nil { return false, err } - t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d\n", mdb.Name, mdb.Spec.Members, sts.Status.ReadyReplicas) + t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d, Current updated replicas: %d\n", + mdb.Name, mdb.Spec.Members, sts.Status.ReadyReplicas, sts.Status.UpdatedReplicas) ready := condition(sts) return ready, nil }) diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index fa49233a4..0adc701dc 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -29,8 +29,27 @@ import ( // StatefulSetIsReady ensures that the underlying stateful set // reaches the running state func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { + return statefulSetIsReady(mdb, time.Second*15, time.Minute*5) +} + +// StatefulSetIsReadyAfterScaleDown ensures that a replica set is scaled down correctly +// note: scaling down takes considerably longer than scaling up due the readiness probe +// failure threshold being high +func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDB) func(t *testing.T) { + return func(t *testing.T) { + err := e2eutil.WaitForStatefulSetToBeReadyAfterScaleDown(t, mdb, time.Second*60, time.Minute*45) + if err != nil { + t.Fatal(err) + } + t.Logf("StatefulSet %s/%s is ready!", mdb.Namespace, mdb.Name) + } +} + +// StatefulSetIsReady ensures that the underlying stateful set +// reaches the running state +func statefulSetIsReady(mdb *mdbv1.MongoDB, interval time.Duration, timeout time.Duration) func(t *testing.T) { return func(t *testing.T) { - err := e2eutil.WaitForStatefulSetToBeReady(t, mdb, time.Second*15, time.Minute*5) + err := e2eutil.WaitForStatefulSetToBeReady(t, mdb, interval, timeout) if err != nil { t.Fatal(err) } @@ -128,9 +147,10 @@ func BasicFunctionality(mdb *mdbv1.MongoDB) func(*testing.T) { }))) t.Run("Test Status Was Updated", Status(mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: mdb.Spec.Members, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentMongoDBMembers: mdb.Spec.Members, + CurrentStatefulSetReplicas: mdb.Spec.Members, })) } } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index b9a61175b..994ffc966 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -17,9 +17,9 @@ func TestMain(m *testing.M) { f.MainEntry(m) } -// TestReplicaSet creates two MongoDB resources that are handled by the Operator at the +// TestReplicaSetMultiple creates two MongoDB resources that are handled by the Operator at the // same time. One of them is scaled to 5 and then back to 3 -func TestReplicaSet(t *testing.T) { +func TestReplicaSetMultiple(t *testing.T) { ctx, shouldCleanup := setup.InitTest(t) @@ -69,24 +69,28 @@ func TestReplicaSet(t *testing.T) { t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb0, 5)) t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 2)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, - mdbv1.MongoDBStatus{ - MongoURI: mdb0.MongoURI(), - Phase: mdbv1.Running, - Members: 5, - })) - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, mdbv1.MongoDBStatus{ - MongoURI: mdb0.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb0.MongoURI(), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, })) + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb0)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb0.MongoURI(), + // Phase: mdbv1.Running, + // CurrentMongoDBMembers: 5, + // CurrentStatefulSetReplicas: 5, + // })) + }) // One last check that mdb1 was not altered. diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go index 5174e7184..62e44a687 100644 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go @@ -52,9 +52,10 @@ func TestReplicaSetReadinessProbeScaling(t *testing.T) { t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 3, + CurrentStatefulSetReplicas: 3, })) }) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 514f5c14c..21dbff9be 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -1,4 +1,4 @@ -package replica_set_readiness_probe +package replica_set_scale_up import ( "testing" @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { f.MainEntry(m) } -func TestReplicaSetScale(t *testing.T) { +func TestReplicaSetScaleUp(t *testing.T) { ctx, shouldCleanup := setup.InitTest(t) @@ -46,22 +46,26 @@ func TestReplicaSetScale(t *testing.T) { t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb, 5)) t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetIsReady(&mdb)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 2)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 5, - })) - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, })) + + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb.MongoURI(), + // Phase: mdbv1.Running, + // CurrentMongoDBMembers: 3, + // CurrentStatefulSetReplicas: 3, + // })) }) } diff --git a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go new file mode 100644 index 000000000..0580aedc1 --- /dev/null +++ b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go @@ -0,0 +1,60 @@ +package replica_set_scale_down + +import ( + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + f "github.com/operator-framework/operator-sdk/pkg/test" +) + +func TestMain(m *testing.M) { + f.MainEntry(m) +} + +func TestReplicaSetScaleDown(t *testing.T) { + ctx, shouldCleanup := setup.InitTest(t) + + if shouldCleanup { + defer ctx.Cleanup() + } + mdb, user := e2eutil.NewTestMongoDB("replica-set-scale-down") + + _, err := setup.GeneratePasswordForUser(user, ctx) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + + t.Run("MongoDB is reachable", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 1)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, + mdbv1.MongoDBStatus{ + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 1, + CurrentStatefulSetReplicas: 1, + })) + }) +}