From 276c030b60b4ab4fd7d8852806a1dbb7dedcf7a7 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 7 Sep 2020 11:12:14 +0100 Subject: [PATCH 01/39] added scaling fix for MongoDB 4.4 --- pkg/apis/mongodb/v1/mongodb_types.go | 16 +++++++ .../mongodb/replica_set_controller.go | 10 +++++ pkg/util/scale/scale.go | 42 +++++++++++++++++++ .../replica_set_multiple_test.go | 2 + .../replica_set_readiness_probe_test.go | 1 + .../replica_set_scaling_test.go | 2 + 6 files changed, 73 insertions(+) create mode 100644 pkg/util/scale/scale.go diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index d44ca3293..9c13d7e20 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/runtime" @@ -197,6 +199,7 @@ type AuthMode string type MongoDBStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` + Members int `json:"members"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -214,9 +217,22 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } +func (m MongoDB) DesiredReplicaSetMembers() int { + return m.Spec.Members +} + +func (m MongoDB) CurrentReplicaSetMembers() int { + return m.Status.Members +} + +func (m MongoDB) MembersThisReconciliation() int { + return scale.ReplicasThisReconciliation(m) +} + func (m *MongoDB) UpdateSuccess() { m.Status.MongoURI = m.MongoURI() m.Status.Phase = Running + m.Status.Members = scale.ReplicasThisReconciliation(m) } // MongoURI returns a mongo uri which can be used to connect to this deployment diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 64b84ccb9..735963ed6 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -9,6 +9,8 @@ import ( "os" "time" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "github.com/pkg/errors" @@ -249,6 +251,14 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R return reconcile.Result{}, err } + if scale.IsStillScaling(mdb) { + r.log.Infow("Continuing scaling operation", "MongoDB", mdb.NamespacedName(), + "currentMembers", mdb.CurrentReplicaSetMembers(), + "desiredMembers", mdb.DesiredReplicaSetMembers(), + ) + return reconcile.Result{Requeue: true}, nil + } + r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", newStatus) return reconcile.Result{}, nil } diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go new file mode 100644 index 000000000..32fb4f460 --- /dev/null +++ b/pkg/util/scale/scale.go @@ -0,0 +1,42 @@ +package scale + +// ReplicaSetScaler is an interface which is able to scale up and down a replicaset +// a single member at a time +type ReplicaSetScaler interface { + DesiredReplicaSetMembers() int + CurrentReplicaSetMembers() int +} + +// ReplicasThisReconciliation returns the number of replicas that should be configured +// for that reconciliation. As of MongoDB 4.4 we can only scale members up / down 1 at a time. +func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { + // the current replica set members will be 0 when we are creating a new deployment + // if this is the case, we want to jump straight to the desired members and not make changes incrementally + if replicaSetScaler.CurrentReplicaSetMembers() == 0 || replicaSetScaler.CurrentReplicaSetMembers() == replicaSetScaler.DesiredReplicaSetMembers() { + return replicaSetScaler.DesiredReplicaSetMembers() + } + + if isScalingDown(replicaSetScaler) { + return replicaSetScaler.CurrentReplicaSetMembers() - 1 + } + + return replicaSetScaler.CurrentReplicaSetMembers() + 1 +} + +func IsStillScaling(replicaSetScaler ReplicaSetScaler) bool { + return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicaSetMembers() +} + +func isScalingDown(replicaSetScaler ReplicaSetScaler) bool { + return replicaSetScaler.DesiredReplicaSetMembers() < replicaSetScaler.CurrentReplicaSetMembers() +} + +// AnyAreStillScaling reports true if any of one the provided members is still scaling +func AnyAreStillScaling(scalers ...ReplicaSetScaler) bool { + for _, s := range scalers { + if IsStillScaling(s) { + return true + } + } + return false +} diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index 7a106bb14..4e5657042 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -71,6 +71,7 @@ func TestReplicaSet(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) @@ -80,6 +81,7 @@ func TestReplicaSet(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go index eb8e02aec..6af18650f 100644 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go @@ -52,6 +52,7 @@ func TestReplicaSetReadinessProbeScaling(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 0098b49a7..16ca5a253 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -51,6 +51,7 @@ func TestReplicaSetScale(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) @@ -60,6 +61,7 @@ func TestReplicaSetScale(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) } From b9bdad0e72137d64b3600289983b6c3b271606e5 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 7 Sep 2020 11:48:09 +0100 Subject: [PATCH 02/39] wip --- pkg/apis/mongodb/v1/mongodb_types.go | 6 +-- .../mongodb/replica_set_controller.go | 43 +++++++++++------- .../mongodb/replicaset_controller_test.go | 44 +++++++++++++++++-- pkg/util/scale/scale.go | 4 ++ 4 files changed, 74 insertions(+), 23 deletions(-) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 9c13d7e20..a7eba561c 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -217,15 +217,15 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m MongoDB) DesiredReplicaSetMembers() int { +func (m *MongoDB) DesiredReplicaSetMembers() int { return m.Spec.Members } -func (m MongoDB) CurrentReplicaSetMembers() int { +func (m *MongoDB) CurrentReplicaSetMembers() int { return m.Status.Members } -func (m MongoDB) MembersThisReconciliation() int { +func (m *MongoDB) MembersThisReconciliation() int { return scale.ReplicasThisReconciliation(m) } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 735963ed6..325c3b007 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -173,7 +173,10 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R } r.log = zap.S().With("ReplicaSet", request.NamespacedName) - r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) + r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status, + "desiredMembers", mdb.DesiredReplicaSetMembers(), + "currentMembers", mdb.CurrentReplicaSetMembers(), + ) if err := r.ensureAutomationConfig(mdb); err != nil { r.log.Warnf("error creating automation config config map: %s", err) @@ -244,22 +247,28 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R return reconcile.Result{}, err } - r.log.Debug("Updating MongoDB Status") - newStatus, err := r.updateAndReturnStatusSuccess(&mdb) - if err != nil { - r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) + newMdb := mdbv1.MongoDB{} + if err := r.client.Get(context.TODO(), mdb.NamespacedName(), &newMdb); err != nil { + r.log.Errorf("Could not get get resource: %s", err) return reconcile.Result{}, err } - if scale.IsStillScaling(mdb) { - r.log.Infow("Continuing scaling operation", "MongoDB", mdb.NamespacedName(), - "currentMembers", mdb.CurrentReplicaSetMembers(), - "desiredMembers", mdb.DesiredReplicaSetMembers(), + if scale.IsStillScaling(&newMdb) { + r.log.Infow("Continuing scaling operation", "MongoDB", newMdb.NamespacedName(), + "currentMembers", newMdb.CurrentReplicaSetMembers(), + "desiredMembers", newMdb.DesiredReplicaSetMembers(), ) return reconcile.Result{Requeue: true}, nil } - r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", newStatus) + r.log.Debug("Updating MongoDB Status") + newStatus, err := r.updateAndReturnStatusSuccess(&newMdb) + if err != nil { + r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) + return reconcile.Result{}, err + } + + r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", newMdb.Spec, "MongoDB.Status", newStatus) return reconcile.Result{}, nil } @@ -359,15 +368,15 @@ func (r ReplicaSetReconciler) setAnnotations(nsName types.NamespacedName, annota // the resource's status is updated to reflect to the state, and any other cleanup // operators should be performed here func (r ReplicaSetReconciler) updateAndReturnStatusSuccess(mdb *mdbv1.MongoDB) (mdbv1.MongoDBStatus, error) { - newMdb := &mdbv1.MongoDB{} - if err := r.client.Get(context.TODO(), mdb.NamespacedName(), newMdb); err != nil { - return mdbv1.MongoDBStatus{}, errors.Errorf("could not get get resource: %s", err) - } - newMdb.UpdateSuccess() - if err := r.client.Status().Update(context.TODO(), newMdb); err != nil { + //newMdb := &mdbv1.MongoDB{} + //if err := r.client.Get(context.TODO(), mdb.NamespacedName(), newMdb); err != nil { + // return mdbv1.MongoDBStatus{}, errors.Errorf("could not get get resource: %s", err) + //} + mdb.UpdateSuccess() + if err := r.client.Status().Update(context.TODO(), mdb); err != nil { return mdbv1.MongoDBStatus{}, errors.Errorf(fmt.Sprintf("could not update status: %s", err)) } - return newMdb.Status, nil + return mdb.Status, nil } func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index 0c472508b..02d068d4f 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -390,14 +390,14 @@ func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { } func TestScramIsConfigured(t *testing.T) { - AssertReplicaSetIsConfiguredWithScram(t, newScramReplicaSet()) + assertReplicaSetIsConfiguredWithScram(t, newScramReplicaSet()) } func TestScramIsConfiguredWhenNotSpecified(t *testing.T) { - AssertReplicaSetIsConfiguredWithScram(t, newTestReplicaSet()) + assertReplicaSetIsConfiguredWithScram(t, newTestReplicaSet()) } -func AssertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { +func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { mgr := client.NewManager(&mdb) r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) @@ -420,6 +420,44 @@ func AssertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { }) } +func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { + mdb := newTestReplicaSet() + + mgr := client.NewManager(&mdb) + r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) + res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, 3, mdb.Status.Members) +} + +func TestReplicaSet_IsScaled_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + mdb := newTestReplicaSet() + + mgr := client.NewManager(&mdb) + r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) + res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + assert.Equal(t, 3, mdb.Status.Members) + + // scale members from 3 to five + mdb.Spec.Members = 5 + + err = mgr.GetClient().Update(context.TODO(), &mdb) + makeStatefulSetReady(t, mgr.GetClient(), mdb) + + res, err = r.Reconcile(reconcile.Request{NamespacedName: mdb.NamespacedName()}) + + assert.NoError(t, err) + assert.Equal(t, true, res.Requeue) + assert.Equal(t, 4, mdb.Status.Members) +} + func TestOpenshift_Configuration(t *testing.T) { sts := performReconciliationAndGetStatefulSet(t, "openshift_mdb.yaml") assert.Equal(t, "MANAGED_SECURITY_CONTEXT", sts.Spec.Template.Spec.Containers[0].Env[1].Name) diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go index 32fb4f460..ff2925130 100644 --- a/pkg/util/scale/scale.go +++ b/pkg/util/scale/scale.go @@ -1,5 +1,7 @@ package scale +import "fmt" + // ReplicaSetScaler is an interface which is able to scale up and down a replicaset // a single member at a time type ReplicaSetScaler interface { @@ -10,6 +12,8 @@ type ReplicaSetScaler interface { // ReplicasThisReconciliation returns the number of replicas that should be configured // for that reconciliation. As of MongoDB 4.4 we can only scale members up / down 1 at a time. func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { + fmt.Println(replicaSetScaler.DesiredReplicaSetMembers()) + fmt.Println(replicaSetScaler.CurrentReplicaSetMembers()) // the current replica set members will be 0 when we are creating a new deployment // if this is the case, we want to jump straight to the desired members and not make changes incrementally if replicaSetScaler.CurrentReplicaSetMembers() == 0 || replicaSetScaler.CurrentReplicaSetMembers() == replicaSetScaler.DesiredReplicaSetMembers() { From 595b45e1a9b4ef1f3bcc22789fb4ca08f03e50f8 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 7 Sep 2020 16:13:36 +0100 Subject: [PATCH 03/39] Added new statuses and unit tests --- pkg/apis/mongodb/v1/mongodb_status_options.go | 186 ++++++++++++++++++ .../mongodb/v1/mongodb_status_options_test.go | 82 ++++++++ pkg/apis/mongodb/v1/mongodb_types.go | 3 + .../mongodb/replica_set_controller.go | 97 +++++---- pkg/util/status/status.go | 45 +++++ pkg/util/status/status_test.go | 86 ++++++++ 6 files changed, 461 insertions(+), 38 deletions(-) create mode 100644 pkg/apis/mongodb/v1/mongodb_status_options.go create mode 100644 pkg/apis/mongodb/v1/mongodb_status_options_test.go create mode 100644 pkg/util/status/status.go create mode 100644 pkg/util/status/status_test.go diff --git a/pkg/apis/mongodb/v1/mongodb_status_options.go b/pkg/apis/mongodb/v1/mongodb_status_options.go new file mode 100644 index 000000000..13e05faac --- /dev/null +++ b/pkg/apis/mongodb/v1/mongodb_status_options.go @@ -0,0 +1,186 @@ +package v1 + +import ( + "fmt" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "github.com/pkg/errors" + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// OptionBuilder is in charge of constructing a slice of options that +// will be applied on top of the MongoDB resource that has been provided +type OptionBuilder struct { + mdb *MongoDB + options []status.Option +} + +// NewOptionBuilder returns an initialized OptionBuilder +func NewOptionBuilder(mdb *MongoDB) *OptionBuilder { + return &OptionBuilder{ + mdb: mdb, + options: []status.Option{}, + } +} + +// toStatusOptions should be called on terminal operations +// these operations will return the final set of options that will +// be applied the status of the resource +func (o *OptionBuilder) toStatusOptions() status.Option { + return multiOption{ + options: o.options, + } +} + +type multiOption struct { + options []status.Option +} + +func (m multiOption) ApplyOption() { + for _, opt := range m.options { + opt.ApplyOption() + } +} + +func (m multiOption) GetResult() (reconcile.Result, error) { + return status.DetermineReconciliationResult(m.options) +} + +func (o *OptionBuilder) Success() status.Option { + return o.WithMembers(o.mdb.Spec.Members). + WithMongoURI(o.mdb.MongoURI()). + RunningPhase() +} + +func (o *OptionBuilder) Failed(msg string) status.Option { + return o.withPhase(Failed, msg).toStatusOptions() +} + +func (o *OptionBuilder) Failedf(msg string, params ...interface{}) status.Option { + return o.Failed(fmt.Sprintf(msg, params...)) +} + +type retryOption struct { + retryAfter int +} + +func (r retryOption) ApplyOption() { + // has no impact on the resource status itself +} + +func (r retryOption) GetResult() (reconcile.Result, error) { + return retry(r.retryAfter) +} + +func (o *OptionBuilder) RetryAfter(seconds int) *OptionBuilder { + o.options = append(o.options, + retryOption{ + retryAfter: seconds, + }) + return o +} + +func (o *OptionBuilder) WithMongoURI(uri string) *OptionBuilder { + o.options = append(o.options, + mongoUriOption{ + mdb: o.mdb, + mongoUri: uri, + }) + return o +} + +type mongoUriOption struct { + mongoUri string + mdb *MongoDB +} + +func (m mongoUriOption) ApplyOption() { + m.mdb.Status.MongoURI = m.mongoUri +} + +func (m mongoUriOption) GetResult() (reconcile.Result, error) { + return ok() +} + +func (o *OptionBuilder) WithMembers(members int) *OptionBuilder { + o.options = append(o.options, + membersOption{ + mdb: o.mdb, + members: members, + }) + return o +} + +type membersOption struct { + members int + mdb *MongoDB +} + +func (m membersOption) ApplyOption() { + m.mdb.Status.Members = m.members +} + +func (m membersOption) GetResult() (reconcile.Result, error) { + return ok() +} + +func (o *OptionBuilder) RunningPhase() status.Option { + return o.withPhase(Running, "").toStatusOptions() +} + +func (o *OptionBuilder) withPhase(phase Phase, msg string) *OptionBuilder { + o.options = append(o.options, + phaseOption{ + mdb: o.mdb, + phase: phase, + message: msg, + }) + return o +} + +func (o *OptionBuilder) PendingPhase(msg string) status.Option { + return o.withPhase(Pending, msg).toStatusOptions() +} + +func (o *OptionBuilder) PendingPhasef(msg string, params ...interface{}) status.Option { + return o.withPhase(Pending, fmt.Sprintf(msg, params...)).toStatusOptions() +} + +type phaseOption struct { + phase Phase + message string + mdb *MongoDB +} + +func (p phaseOption) ApplyOption() { + p.mdb.Status.Phase = p.phase +} + +func (p phaseOption) GetResult() (reconcile.Result, error) { + if p.phase == Running { + return ok() + } + if p.phase == Pending { + return retry(10) + } + if p.phase == Failed { + // TODO: don't access global logger here + zap.S().Errorf(p.message) + return failed(p.message) + } + return ok() +} + +func ok() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func retry(after int) (reconcile.Result, error) { + return reconcile.Result{RequeueAfter: time.Second * time.Duration(after)}, nil +} + +func failed(msg string, params ...interface{}) (reconcile.Result, error) { + return reconcile.Result{}, errors.Errorf(msg, params...) +} diff --git a/pkg/apis/mongodb/v1/mongodb_status_options_test.go b/pkg/apis/mongodb/v1/mongodb_status_options_test.go new file mode 100644 index 000000000..2ac6f5b63 --- /dev/null +++ b/pkg/apis/mongodb/v1/mongodb_status_options_test.go @@ -0,0 +1,82 @@ +package v1 + +import ( + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "github.com/stretchr/testify/assert" +) + +func TestMongoUriOption_ApplyOption(t *testing.T) { + + mdb := newReplicaSet(3, "my-rs", "my-ns") + + opt := mongoUriOption{ + mdb: &mdb, + mongoUri: "my-uri", + } + + opt.ApplyOption() + + assert.Equal(t, "my-uri", mdb.Status.MongoURI, "Status should be updated") +} + +func TestMembersOption_ApplyOption(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + opt := membersOption{ + mdb: &mdb, + members: 5, + } + + opt.ApplyOption() + + assert.Equal(t, 3, mdb.Spec.Members, "Spec should remain unchanged") + assert.Equal(t, 5, mdb.Status.Members, "Status should be updated") +} + +func TestMultiOption_ApplyOption(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + opt := multiOption{ + options: []status.Option{ + membersOption{ + mdb: &mdb, + members: 4, + }, + mongoUriOption{ + mdb: &mdb, + mongoUri: "my-uri", + }, + }, + } + + opt.ApplyOption() + + assert.Equal(t, 4, mdb.Status.Members) + assert.Equal(t, "my-uri", mdb.Status.MongoURI) +} + +func TestOptionBuilder_RunningPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + NewOptionBuilder(&mdb).RunningPhase().ApplyOption() + + assert.Equal(t, Running, mdb.Status.Phase) +} + +func TestOptionBuilder_PendingPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + NewOptionBuilder(&mdb).PendingPhase("pending").ApplyOption() + + assert.Equal(t, Pending, mdb.Status.Phase) +} + +func TestOptionBuilder_FailedPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + NewOptionBuilder(&mdb).Failed("failed").ApplyOption() + + assert.Equal(t, Failed, mdb.Status.Phase) +} diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index d44ca3293..f27fb24cc 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -24,6 +24,8 @@ type Phase string const ( Running Phase = "Running" + Failed Phase = "Failed" + Pending Phase = "Pending" ) const ( @@ -197,6 +199,7 @@ type AuthMode string type MongoDBStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` + Members int `json:"members"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 64b84ccb9..da0711770 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -7,9 +7,9 @@ import ( "fmt" "io/ioutil" "os" - "time" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" "github.com/pkg/errors" @@ -174,56 +174,73 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) if err := r.ensureAutomationConfig(mdb); err != nil { - r.log.Warnf("error creating automation config config map: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb). + Failedf("error creating automation config config map: %s", err), + ) } r.log.Debug("Ensuring the service exists") if err := r.ensureService(mdb); err != nil { - r.log.Warnf("Error ensuring the service exists: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb). + Failedf("Error ensuring the service exists: %s", err), + ) } isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { - r.log.Warnf("Error validating TLS config: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb). + Failedf("Error validating TLS config: %s", err), + ) } if !isTLSValid { - r.log.Infof("TLS config is not yet valid, retrying in 10 seconds") - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb). + RetryAfter(10). + PendingPhase("TLS config is not yet valid, retrying in 10 seconds"), + ) } r.log.Debug("Creating/Updating StatefulSet") if err := r.createOrUpdateStatefulSet(mdb); err != nil { - r.log.Warnf("Error creating/updating StatefulSet: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb). + Failedf("Error creating/updating StatefulSet: %s", err), + ) } currentSts := appsv1.StatefulSet{} if err := r.client.Get(context.TODO(), mdb.NamespacedName(), ¤tSts); err != nil { + errMsg := err.Error() if !apiErrors.IsNotFound(err) { - r.log.Warnf("Error getting StatefulSet: %s", err) + errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) } - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, mdbv1.NewOptionBuilder(&mdb).Failed(errMsg)) + } r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", getUpdateStrategyType(mdb)) ready, err := r.isStatefulSetReady(mdb, ¤tSts) if err != nil { - r.log.Warnf("Error checking StatefulSet status: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb).Failedf("Error checking StatefulSet status: %s", err), + ) } if !ready { - r.log.Infof("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name) - return reconcile.Result{RequeueAfter: time.Second * 10}, nil + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb).RetryAfter(10). + PendingPhasef("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name), + ) } r.log.Debug("Resetting StatefulSet UpdateStrategy") if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { - r.log.Warnf("Error resetting StatefulSet UpdateStrategyType: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb).Failedf("Error resetting StatefulSet UpdateStrategyType: %s", err), + ) } r.log.Debug("Setting MongoDB Annotations") @@ -233,24 +250,19 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R hasLeftReadyStateAnnotationKey: "false", } if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { - r.log.Warnf("Error setting annotations: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb).Failedf("Error setting annotations: %s", err), + ) } if err := r.completeTLSRollout(mdb); err != nil { - r.log.Warnf("Error completing TLS rollout: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + mdbv1.NewOptionBuilder(&mdb).Failedf("Error completing TLS rollout: %s", err), + ) } r.log.Debug("Updating MongoDB Status") - newStatus, err := r.updateAndReturnStatusSuccess(&mdb) - if err != nil { - r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) - return reconcile.Result{}, err - } - - r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", newStatus) - return reconcile.Result{}, nil + return r.updateStatus(&mdb) } // resetStatefulSetUpdateStrategy ensures the stateful set is configured back to using RollingUpdateStatefulSetStrategyType @@ -345,19 +357,28 @@ func (r ReplicaSetReconciler) setAnnotations(nsName types.NamespacedName, annota }) } -// updateAndReturnStatusSuccess should be called after a successful reconciliation +// updateStatus should be called after a successful reconciliation // the resource's status is updated to reflect to the state, and any other cleanup // operators should be performed here -func (r ReplicaSetReconciler) updateAndReturnStatusSuccess(mdb *mdbv1.MongoDB) (mdbv1.MongoDBStatus, error) { +func (r ReplicaSetReconciler) updateStatus(mdb *mdbv1.MongoDB) (reconcile.Result, error) { newMdb := &mdbv1.MongoDB{} if err := r.client.Get(context.TODO(), mdb.NamespacedName(), newMdb); err != nil { - return mdbv1.MongoDBStatus{}, errors.Errorf("could not get get resource: %s", err) + return reconcile.Result{}, errors.Errorf("could not get get resource: %s", err) } - newMdb.UpdateSuccess() - if err := r.client.Status().Update(context.TODO(), newMdb); err != nil { - return mdbv1.MongoDBStatus{}, errors.Errorf(fmt.Sprintf("could not update status: %s", err)) + + res, err := status.Update(r.client.Status(), mdb, mdbv1.NewOptionBuilder(mdb).Success()) + if err != nil { + r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) + return reconcile.Result{}, err } - return newMdb.Status, nil + + if res.RequeueAfter > 0 || res.Requeue { + r.log.Infow("Requeuing reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", res) + return res, err + } + + r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", res) + return res, err } func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go new file mode 100644 index 000000000..cb6dcaadb --- /dev/null +++ b/pkg/util/status/status.go @@ -0,0 +1,45 @@ +package status + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Option interface { + ApplyOption() + GetResult() (reconcile.Result, error) +} + +func Update(statusWriter client.StatusWriter, obj runtime.Object, options ...Option) (reconcile.Result, error) { + for _, opt := range options { + opt.ApplyOption() + } + + if err := statusWriter.Update(context.TODO(), obj); err != nil { + return reconcile.Result{}, err + } + + return DetermineReconciliationResult(options) +} + +func DetermineReconciliationResult(options []Option) (reconcile.Result, error) { + // if there are any errors in any of our options, we return those first + for _, opt := range options { + res, err := opt.GetResult() + if err != nil { + return res, err + } + } + // otherwise we might need to re-queue + for _, opt := range options { + res, _ := opt.GetResult() + if res.Requeue || res.RequeueAfter > 0 { + return res, nil + } + } + // it was a successful reconciliation, nothing to do + return reconcile.Result{}, nil +} diff --git a/pkg/util/status/status_test.go b/pkg/util/status/status_test.go new file mode 100644 index 000000000..a900a5c59 --- /dev/null +++ b/pkg/util/status/status_test.go @@ -0,0 +1,86 @@ +package status + +import ( + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type errorOption struct{} + +func (e errorOption) ApplyOption() {} + +func (e errorOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, errors.Errorf("error") +} + +type successOption struct{} + +func (s successOption) ApplyOption() {} + +func (s successOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +type retryOption struct{} + +func (r retryOption) ApplyOption() {} + +func (r retryOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{Requeue: true}, nil +} + +func TestDetermineReconciliationResult(t *testing.T) { + + t.Run("A single error option should result in an error return", func(t *testing.T) { + opts := []Option{ + errorOption{}, + successOption{}, + successOption{}, + } + + res, err := DetermineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("An error option takes precedence over a retry", func(t *testing.T) { + opts := []Option{ + errorOption{}, + retryOption{}, + successOption{}, + } + res, err := DetermineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("No errors will result in a successful reconciliation", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + successOption{}, + } + res, err := DetermineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("A retry will take precedence over success", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + retryOption{}, + } + res, err := DetermineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, true, res.Requeue) + }) + +} From 1173c3196488c21aca22cf6f54ac24da2e06e865 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 7 Sep 2020 16:52:11 +0100 Subject: [PATCH 04/39] moved into main controller package --- pkg/apis/mongodb/v1/mongodb_types.go | 2 +- .../mongodb}/mongodb_status_options.go | 67 ++++++++++--------- .../mongodb}/mongodb_status_options_test.go | 29 ++++++-- .../mongodb/replica_set_controller.go | 38 +++++------ .../replica_set_tls/replica_set_tls_test.go | 2 +- .../replica_set_tls_rotate_test.go | 2 +- .../replica_set_tls_upgrade_test.go | 2 +- 7 files changed, 79 insertions(+), 63 deletions(-) rename pkg/{apis/mongodb/v1 => controller/mongodb}/mongodb_status_options.go (62%) rename pkg/{apis/mongodb/v1 => controller/mongodb}/mongodb_status_options_test.go (65%) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index f27fb24cc..612658d2b 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -24,7 +24,7 @@ type Phase string const ( Running Phase = "Running" - Failed Phase = "Failed" + Failed Phase = "failed" Pending Phase = "Pending" ) diff --git a/pkg/apis/mongodb/v1/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go similarity index 62% rename from pkg/apis/mongodb/v1/mongodb_status_options.go rename to pkg/controller/mongodb/mongodb_status_options.go index 13e05faac..361f7144f 100644 --- a/pkg/apis/mongodb/v1/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -1,7 +1,8 @@ -package v1 +package mongodb import ( "fmt" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" "time" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -10,16 +11,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// OptionBuilder is in charge of constructing a slice of options that +// optionBuilder is in charge of constructing a slice of options that // will be applied on top of the MongoDB resource that has been provided -type OptionBuilder struct { - mdb *MongoDB +type optionBuilder struct { + mdb *v1.MongoDB options []status.Option } -// NewOptionBuilder returns an initialized OptionBuilder -func NewOptionBuilder(mdb *MongoDB) *OptionBuilder { - return &OptionBuilder{ +// newOptionBuilder returns an initialized optionBuilder +func newOptionBuilder(mdb *v1.MongoDB) *optionBuilder { + return &optionBuilder{ mdb: mdb, options: []status.Option{}, } @@ -28,7 +29,7 @@ func NewOptionBuilder(mdb *MongoDB) *OptionBuilder { // toStatusOptions should be called on terminal operations // these operations will return the final set of options that will // be applied the status of the resource -func (o *OptionBuilder) toStatusOptions() status.Option { +func (o *optionBuilder) toStatusOptions() status.Option { return multiOption{ options: o.options, } @@ -48,18 +49,18 @@ func (m multiOption) GetResult() (reconcile.Result, error) { return status.DetermineReconciliationResult(m.options) } -func (o *OptionBuilder) Success() status.Option { - return o.WithMembers(o.mdb.Spec.Members). - WithMongoURI(o.mdb.MongoURI()). - RunningPhase() +func (o *optionBuilder) success() status.Option { + return o.withMembers(o.mdb.Spec.Members). + withMongoURI(o.mdb.MongoURI()). + runningPhase() } -func (o *OptionBuilder) Failed(msg string) status.Option { - return o.withPhase(Failed, msg).toStatusOptions() +func (o *optionBuilder) failed(msg string) status.Option { + return o.withPhase(v1.Failed, msg).toStatusOptions() } -func (o *OptionBuilder) Failedf(msg string, params ...interface{}) status.Option { - return o.Failed(fmt.Sprintf(msg, params...)) +func (o *optionBuilder) failedf(msg string, params ...interface{}) status.Option { + return o.failed(fmt.Sprintf(msg, params...)) } type retryOption struct { @@ -74,7 +75,7 @@ func (r retryOption) GetResult() (reconcile.Result, error) { return retry(r.retryAfter) } -func (o *OptionBuilder) RetryAfter(seconds int) *OptionBuilder { +func (o *optionBuilder) retryAfter(seconds int) *optionBuilder { o.options = append(o.options, retryOption{ retryAfter: seconds, @@ -82,7 +83,7 @@ func (o *OptionBuilder) RetryAfter(seconds int) *OptionBuilder { return o } -func (o *OptionBuilder) WithMongoURI(uri string) *OptionBuilder { +func (o *optionBuilder) withMongoURI(uri string) *optionBuilder { o.options = append(o.options, mongoUriOption{ mdb: o.mdb, @@ -93,7 +94,7 @@ func (o *OptionBuilder) WithMongoURI(uri string) *OptionBuilder { type mongoUriOption struct { mongoUri string - mdb *MongoDB + mdb *v1.MongoDB } func (m mongoUriOption) ApplyOption() { @@ -104,7 +105,7 @@ func (m mongoUriOption) GetResult() (reconcile.Result, error) { return ok() } -func (o *OptionBuilder) WithMembers(members int) *OptionBuilder { +func (o *optionBuilder) withMembers(members int) *optionBuilder { o.options = append(o.options, membersOption{ mdb: o.mdb, @@ -115,7 +116,7 @@ func (o *OptionBuilder) WithMembers(members int) *OptionBuilder { type membersOption struct { members int - mdb *MongoDB + mdb *v1.MongoDB } func (m membersOption) ApplyOption() { @@ -126,11 +127,11 @@ func (m membersOption) GetResult() (reconcile.Result, error) { return ok() } -func (o *OptionBuilder) RunningPhase() status.Option { - return o.withPhase(Running, "").toStatusOptions() +func (o *optionBuilder) runningPhase() status.Option { + return o.withPhase(v1.Running, "").toStatusOptions() } -func (o *OptionBuilder) withPhase(phase Phase, msg string) *OptionBuilder { +func (o *optionBuilder) withPhase(phase v1.Phase, msg string) *optionBuilder { o.options = append(o.options, phaseOption{ mdb: o.mdb, @@ -140,18 +141,18 @@ func (o *OptionBuilder) withPhase(phase Phase, msg string) *OptionBuilder { return o } -func (o *OptionBuilder) PendingPhase(msg string) status.Option { - return o.withPhase(Pending, msg).toStatusOptions() +func (o *optionBuilder) pendingPhase(msg string) status.Option { + return o.withPhase(v1.Pending, msg).toStatusOptions() } -func (o *OptionBuilder) PendingPhasef(msg string, params ...interface{}) status.Option { - return o.withPhase(Pending, fmt.Sprintf(msg, params...)).toStatusOptions() +func (o *optionBuilder) pendingPhasef(msg string, params ...interface{}) status.Option { + return o.withPhase(v1.Pending, fmt.Sprintf(msg, params...)).toStatusOptions() } type phaseOption struct { - phase Phase + phase v1.Phase message string - mdb *MongoDB + mdb *v1.MongoDB } func (p phaseOption) ApplyOption() { @@ -159,13 +160,13 @@ func (p phaseOption) ApplyOption() { } func (p phaseOption) GetResult() (reconcile.Result, error) { - if p.phase == Running { + if p.phase == v1.Running { return ok() } - if p.phase == Pending { + if p.phase == v1.Pending { return retry(10) } - if p.phase == Failed { + if p.phase == v1.Failed { // TODO: don't access global logger here zap.S().Errorf(p.message) return failed(p.message) diff --git a/pkg/apis/mongodb/v1/mongodb_status_options_test.go b/pkg/controller/mongodb/mongodb_status_options_test.go similarity index 65% rename from pkg/apis/mongodb/v1/mongodb_status_options_test.go rename to pkg/controller/mongodb/mongodb_status_options_test.go index 2ac6f5b63..884343403 100644 --- a/pkg/apis/mongodb/v1/mongodb_status_options_test.go +++ b/pkg/controller/mongodb/mongodb_status_options_test.go @@ -1,6 +1,8 @@ -package v1 +package mongodb import ( + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "testing" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -60,23 +62,36 @@ func TestMultiOption_ApplyOption(t *testing.T) { func TestOptionBuilder_RunningPhase(t *testing.T) { mdb := newReplicaSet(3, "my-rs", "my-ns") - NewOptionBuilder(&mdb).RunningPhase().ApplyOption() + newOptionBuilder(&mdb).runningPhase().ApplyOption() - assert.Equal(t, Running, mdb.Status.Phase) + assert.Equal(t, mdbv1.Running, mdb.Status.Phase) } func TestOptionBuilder_PendingPhase(t *testing.T) { mdb := newReplicaSet(3, "my-rs", "my-ns") - NewOptionBuilder(&mdb).PendingPhase("pending").ApplyOption() + newOptionBuilder(&mdb).pendingPhase("pending").ApplyOption() - assert.Equal(t, Pending, mdb.Status.Phase) + assert.Equal(t, mdbv1.Pending, mdb.Status.Phase) } func TestOptionBuilder_FailedPhase(t *testing.T) { mdb := newReplicaSet(3, "my-rs", "my-ns") - NewOptionBuilder(&mdb).Failed("failed").ApplyOption() + newOptionBuilder(&mdb).failed("failed").ApplyOption() - assert.Equal(t, Failed, mdb.Status.Phase) + assert.Equal(t, mdbv1.Failed, mdb.Status.Phase) +} + +func newReplicaSet(members int, name, namespace string) mdbv1.MongoDB { + return mdbv1.MongoDB{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: mdbv1.MongoDBSpec{ + Members: members, + }, + } } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index da0711770..110f53198 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -175,39 +175,39 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.ensureAutomationConfig(mdb); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb). - Failedf("error creating automation config config map: %s", err), + newOptionBuilder(&mdb). + failedf("error creating automation config config map: %s", err), ) } r.log.Debug("Ensuring the service exists") if err := r.ensureService(mdb); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb). - Failedf("Error ensuring the service exists: %s", err), + newOptionBuilder(&mdb). + failedf("Error ensuring the service exists: %s", err), ) } isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb). - Failedf("Error validating TLS config: %s", err), + newOptionBuilder(&mdb). + failedf("Error validating TLS config: %s", err), ) } if !isTLSValid { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb). - RetryAfter(10). - PendingPhase("TLS config is not yet valid, retrying in 10 seconds"), + newOptionBuilder(&mdb). + retryAfter(10). + pendingPhase("TLS config is not yet valid, retrying in 10 seconds"), ) } r.log.Debug("Creating/Updating StatefulSet") if err := r.createOrUpdateStatefulSet(mdb); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb). - Failedf("Error creating/updating StatefulSet: %s", err), + newOptionBuilder(&mdb). + failedf("Error creating/updating StatefulSet: %s", err), ) } @@ -217,7 +217,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !apiErrors.IsNotFound(err) { errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) } - return status.Update(r.client.Status(), &mdb, mdbv1.NewOptionBuilder(&mdb).Failed(errMsg)) + return status.Update(r.client.Status(), &mdb, newOptionBuilder(&mdb).failed(errMsg)) } @@ -225,21 +225,21 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ready, err := r.isStatefulSetReady(mdb, ¤tSts) if err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb).Failedf("Error checking StatefulSet status: %s", err), + newOptionBuilder(&mdb).failedf("Error checking StatefulSet status: %s", err), ) } if !ready { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb).RetryAfter(10). - PendingPhasef("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name), + newOptionBuilder(&mdb).retryAfter(10). + pendingPhasef("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name), ) } r.log.Debug("Resetting StatefulSet UpdateStrategy") if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb).Failedf("Error resetting StatefulSet UpdateStrategyType: %s", err), + newOptionBuilder(&mdb).failedf("Error resetting StatefulSet UpdateStrategyType: %s", err), ) } @@ -251,13 +251,13 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R } if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb).Failedf("Error setting annotations: %s", err), + newOptionBuilder(&mdb).failedf("Error setting annotations: %s", err), ) } if err := r.completeTLSRollout(mdb); err != nil { return status.Update(r.client.Status(), &mdb, - mdbv1.NewOptionBuilder(&mdb).Failedf("Error completing TLS rollout: %s", err), + newOptionBuilder(&mdb).failedf("Error completing TLS rollout: %s", err), ) } @@ -366,7 +366,7 @@ func (r ReplicaSetReconciler) updateStatus(mdb *mdbv1.MongoDB) (reconcile.Result return reconcile.Result{}, errors.Errorf("could not get get resource: %s", err) } - res, err := status.Update(r.client.Status(), mdb, mdbv1.NewOptionBuilder(mdb).Success()) + res, err := status.Update(r.client.Status(), mdb, newOptionBuilder(mdb).success()) if err != nil { r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) return reconcile.Result{}, err diff --git a/test/e2e/replica_set_tls/replica_set_tls_test.go b/test/e2e/replica_set_tls/replica_set_tls_test.go index 8cb68d2f1..fff78d722 100644 --- a/test/e2e/replica_set_tls/replica_set_tls_test.go +++ b/test/e2e/replica_set_tls/replica_set_tls_test.go @@ -30,7 +30,7 @@ func TestReplicaSetTLS(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("Failed to set up TLS resources: %s", err) + t.Fatalf("failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) diff --git a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go index 805ad09d2..a41a908bd 100644 --- a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go +++ b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go @@ -32,7 +32,7 @@ func TestReplicaSetTLSRotate(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("Failed to set up TLS resources: %s", err) + t.Fatalf("failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) diff --git a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go index 4d8ed0c71..bf47b36ce 100644 --- a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go +++ b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go @@ -31,7 +31,7 @@ func TestReplicaSetTLSUpgrade(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("Failed to set up TLS resources: %s", err) + t.Fatalf("failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) From 3cc7e226db4cfde72b36dd47b4c49981aa8f5ed5 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 9 Sep 2020 11:41:28 +0100 Subject: [PATCH 05/39] Fix 4.4 scaling by scaling one member at a time --- pkg/apis/mongodb/v1/mongodb_types.go | 4 ++-- pkg/controller/mongodb/replica_set_controller.go | 13 ++++++++++++- pkg/util/status/status.go | 3 +++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 27dda9060..9dc39b46f 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -220,11 +220,11 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m *MongoDB) DesiredReplicaSetMembers() int { +func (m MongoDB) DesiredReplicaSetMembers() int { return m.Spec.Members } -func (m *MongoDB) CurrentReplicaSetMembers() int { +func (m MongoDB) CurrentReplicaSetMembers() int { return m.Status.Members } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 336d2691a..8311a474e 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -9,6 +9,8 @@ import ( "os" "strings" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -290,9 +292,18 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } + if scale.IsStillScaling(mdb) { + return status.Update(r.client.Status(), &mdb, statusOptions(). + withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicaSetMembers(), mdb.DesiredReplicaSetMembers())). + withMembers(mdb.MembersThisReconciliation()). + withPendingPhase(10), + ) + } + res, err := status.Update(r.client.Status(), &mdb, statusOptions(). withMongoURI(mdb.MongoURI()). - withMembers(mdb.Spec.Members). + withMembers(mdb.MembersThisReconciliation()). withMessage(None, ""). withRunningPhase(), ) diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go index e0dc13158..853bf99dd 100644 --- a/pkg/util/status/status.go +++ b/pkg/util/status/status.go @@ -3,6 +3,8 @@ package status import ( "context" + "go.uber.org/zap" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,6 +28,7 @@ func Update(statusWriter client.StatusWriter, mdb *mdbv1.MongoDB, optionBuilder } if err := statusWriter.Update(context.TODO(), mdb); err != nil { + zap.S().Errorf("Error updating resource status: %s", err) return reconcile.Result{}, err } From 50e55e5fb0148b0cc6dd2d1a70ec25d841ef294e Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 9 Sep 2020 11:43:58 +0100 Subject: [PATCH 06/39] reverted unintentional changes --- pkg/util/status/status_test.go | 1 - test/e2e/replica_set_tls/replica_set_tls_test.go | 2 +- test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go | 2 +- .../e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/util/status/status_test.go b/pkg/util/status/status_test.go index 8a838e2fd..b8b4baeb6 100644 --- a/pkg/util/status/status_test.go +++ b/pkg/util/status/status_test.go @@ -45,7 +45,6 @@ func TestDetermineReconciliationResult(t *testing.T) { } res, err := determineReconciliationResult(opts) - assert.NotNil(t, err) assert.Equal(t, false, res.Requeue) assert.Equal(t, time.Duration(0), res.RequeueAfter) diff --git a/test/e2e/replica_set_tls/replica_set_tls_test.go b/test/e2e/replica_set_tls/replica_set_tls_test.go index 2c635ebb1..515bed9f9 100644 --- a/test/e2e/replica_set_tls/replica_set_tls_test.go +++ b/test/e2e/replica_set_tls/replica_set_tls_test.go @@ -30,7 +30,7 @@ func TestReplicaSetTLS(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("failed to set up TLS resources: %s", err) + t.Fatalf("Failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) diff --git a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go index cab6b8cca..e9719f3c9 100644 --- a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go +++ b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go @@ -32,7 +32,7 @@ func TestReplicaSetTLSRotate(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("failed to set up TLS resources: %s", err) + t.Fatalf("Failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) diff --git a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go index f4420ac5e..c0316d6f2 100644 --- a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go +++ b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go @@ -31,7 +31,7 @@ func TestReplicaSetTLSUpgrade(t *testing.T) { } if err := setup.CreateTLSResources(mdb.Namespace, ctx); err != nil { - t.Fatalf("failed to set up TLS resources: %s", err) + t.Fatalf("Failed to set up TLS resources: %s", err) } tester, err := FromResource(t, mdb) From 5a6137d055e3d8989deee888e262da1c05177fcb Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 9 Sep 2020 11:45:55 +0100 Subject: [PATCH 07/39] immediate requeue if still scaling --- pkg/controller/mongodb/replica_set_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 8311a474e..ebb3f9f2c 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -297,7 +297,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicaSetMembers(), mdb.DesiredReplicaSetMembers())). withMembers(mdb.MembersThisReconciliation()). - withPendingPhase(10), + withPendingPhase(0), ) } From a9a606b93449d71a744e75dcfcdc83c7eade1859 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 9 Sep 2020 11:48:00 +0100 Subject: [PATCH 08/39] updated scaling test to use mongodb 4.4 --- test/e2e/replica_set_scale/replica_set_scaling_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index fc768a290..514f5c14c 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -25,7 +25,6 @@ func TestReplicaSetScale(t *testing.T) { } mdb, user := e2eutil.NewTestMongoDB("mdb0") - mdb.Spec.Version = "4.0.6" // TOOD: Scaling will currently not work with MongoDB 4.4 _, err := setup.GeneratePasswordForUser(user, ctx) if err != nil { From 6469e8c855fed5499a6c574d82650b0871d3e184 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 9 Sep 2020 11:49:02 +0100 Subject: [PATCH 09/39] removed duplicate log line --- pkg/util/status/status.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go index 853bf99dd..e0dc13158 100644 --- a/pkg/util/status/status.go +++ b/pkg/util/status/status.go @@ -3,8 +3,6 @@ package status import ( "context" - "go.uber.org/zap" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,7 +26,6 @@ func Update(statusWriter client.StatusWriter, mdb *mdbv1.MongoDB, optionBuilder } if err := statusWriter.Update(context.TODO(), mdb); err != nil { - zap.S().Errorf("Error updating resource status: %s", err) return reconcile.Result{}, err } From c141d785df6640ce18ad31e49194813cbf1474ec Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 10 Sep 2020 17:00:34 +0100 Subject: [PATCH 10/39] wip --- agent/Dockerfile | 4 +- deploy/operator.yaml | 3 +- .../mongodb/replica_set_controller.go | 96 ++++++++++++++++--- pkg/util/scale/scale.go | 4 +- 4 files changed, 88 insertions(+), 19 deletions(-) diff --git a/agent/Dockerfile b/agent/Dockerfile index 98861316e..b4fd39b0b 100644 --- a/agent/Dockerfile +++ b/agent/Dockerfile @@ -19,9 +19,9 @@ RUN mkdir -p agent \ && chmod -R +r /var/lib/automation/config \ && rm agent/mongodb-agent.tar.gz \ && rm -r mongodb-mms-automation-agent-* - RUN mkdir -p /var/lib/mongodb-mms-automation/probes/ \ - && curl --retry 3 https://readinessprobe-test.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ +# && curl --retry 3 https://readinessprobe-test.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ + && curl --retry 3 https://readiness-probe-scale-test.s3.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ && chmod +x /var/lib/mongodb-mms-automation/probes/readinessprobe \ && mkdir -p /var/log/mongodb-mms-automation/ \ && chmod -R +wr /var/log/mongodb-mms-automation/ \ diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 08dd191f9..de97988d7 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -31,7 +31,8 @@ spec: - name: OPERATOR_NAME value: "mongodb-kubernetes-operator" - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/mongodb/mongodb-agent:10.19.0.6562-1 + value: quay.io/mongodb/mongodb-agent-dev:scaletest +# value: quay.io/mongodb/mongodb-agent:10.19.0.6562-1 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 - name: MONGODB_IMAGE diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index ebb3f9f2c..acc4a9299 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -62,15 +62,18 @@ const ( mongodbImageEnv = "MONGODB_IMAGE" mongodbRepoUrl = "MONGODB_REPO_URL" mongodbToolsVersionEnv = "MONGODB_TOOLS_VERSION" + headlessAgentEnv = "HEADLESS_AGENT" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigEnv = "AUTOMATION_CONFIG_MAP" - AutomationConfigKey = "automation-config" + AutomationConfigKey = "cluster-config.json" agentName = "mongodb-agent" mongodbName = "mongod" versionUpgradeHookName = "mongod-posthook" dataVolumeName = "data-volume" versionManifestFilePath = "/usr/local/version_manifest.json" readinessProbePath = "/var/lib/mongodb-mms-automation/probes/readinessprobe" - clusterFilePath = "/var/lib/automation/config/automation-config" + clusterFilePath = "/var/lib/automation/config/cluster-config.json" operatorServiceAccountName = "mongodb-kubernetes-operator" agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" @@ -184,7 +187,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.ensureAutomationConfig(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, fmt.Sprintf("error creating automation config config map: %s", err)). + withMessage(Error, fmt.Sprintf("error creating automation config secret: %s", err)). withFailedPhase(), ) } @@ -198,6 +201,24 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } + // if we're scaling down, we need to wait until the + if scale.IsScalingDown(mdb) { + isAtDesiredReplicaCount, err := r.isExpectedNumberOfStatefulSetReplicasReady(mdb) + r.log.Infow("Replica Set is scaling", "isAtDesiredReplicaCount", isAtDesiredReplicaCount) + if err != nil { + r.log.Errorf("error: %s", err) + return reconcile.Result{}, err + } + + if !isAtDesiredReplicaCount { + return status.Update(r.client.Status(), &mdb, statusOptions(). + withMessage(Info, fmt.Sprintf("Replica Set is scaling down, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicaSetMembers(), mdb.DesiredReplicaSetMembers())). + withPendingPhase(10), + ) + } + } + isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { return status.Update(r.client.Status(), &mdb, @@ -214,6 +235,25 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } + mdbSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + if k8sClient.IgnoreNotFound(err) != nil { + return reconcile.Result{}, err + } + + readyToUpdateSts := true + if mdbSts.Status.Replicas > int32(mdb.MembersThisReconciliation()) { + readyToUpdateSts = mdbSts.Status.ReadyReplicas == int32(mdb.MembersThisReconciliation()) + r.log.Debugw("ReplicaSet Scaling", "readyReplicas", mdbSts.Status.Replicas, "replicasThisReconciliation", mdb.MembersThisReconciliation()) + } + + if !readyToUpdateSts { + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("Not ready to update MongoDB resource %s", mdb.NamespacedName())). + withPendingPhase(5), + ) + } + r.log.Debug("Creating/Updating StatefulSet") if err := r.createOrUpdateStatefulSet(mdb); err != nil { return status.Update(r.client.Status(), &mdb, @@ -355,7 +395,7 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := statefulset.IsReady(*existingStatefulSet, mdb.Spec.Members) + isReady := statefulset.IsReady(*existingStatefulSet, mdb.MembersThisReconciliation()) if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ @@ -376,6 +416,17 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta return areEqual && isReady, nil } +func (r *ReplicaSetReconciler) isExpectedNumberOfStatefulSetReplicasReady(mdb mdbv1.MongoDB) (bool, error) { + sts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + if err != nil { + return false, err + } + desiredReadyReplicas := int32(mdb.MembersThisReconciliation()) + r.log.Infow("isExpectedNumberOfStatefulSetReplicasReady", "desiredReadyReplicas", desiredReadyReplicas) + + return sts.Status.ReadyReplicas == desiredReadyReplicas, nil +} + func (r *ReplicaSetReconciler) ensureService(mdb mdbv1.MongoDB) error { svc := buildService(mdb) err := r.client.Create(context.TODO(), &svc) @@ -429,7 +480,7 @@ func buildAutomationConfig(mdb mdbv1.MongoDB, mdbVersionConfig automationconfig. SetTopology(automationconfig.ReplicaSetTopology). SetName(mdb.Name). SetDomain(domain). - SetMembers(mdb.Spec.Members). + SetMembers(mdb.MembersThisReconciliation()). SetPreviousAutomationConfig(currentAc). SetMongoDBVersion(mdb.Spec.Version). SetFCV(mdb.GetFCV()). @@ -518,17 +569,17 @@ func (r ReplicaSetReconciler) buildAutomationConfigSecret(mdb mdbv1.MongoDB) (co authModification, err := scram.EnsureScram(r.client, mdb.ScramCredentialsNamespacedName(), mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not ensure scram credentials: %s", err) } tlsModification, err := getTLSConfigModification(r.client, mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not configure TLS modification: %s", err) } currentAC, err := getCurrentAutomationConfig(r.client, mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not read existing automation config: %s", err) } ac, err := buildAutomationConfig( @@ -539,11 +590,11 @@ func (r ReplicaSetReconciler) buildAutomationConfigSecret(mdb mdbv1.MongoDB) (co tlsModification, ) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, fmt.Errorf("could not build automation config: %s", err) } acBytes, err := json.Marshal(ac) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, fmt.Errorf("could not marshal automation config: %s", err) } return secret.Builder(). @@ -589,7 +640,7 @@ func isChangingVersion(mdb mdbv1.MongoDB) bool { return false } -func mongodbAgentContainer(volumeMounts []corev1.VolumeMount) container.Modification { +func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount) container.Modification { return container.Apply( container.WithName(agentName), container.WithImage(os.Getenv(agentImageEnv)), @@ -608,6 +659,23 @@ func mongodbAgentContainer(volumeMounts []corev1.VolumeMount) container.Modifica }, ), container.WithEnvs( + corev1.EnvVar{ + Name: headlessAgentEnv, + Value: "true", + }, + corev1.EnvVar{ + Name: podNamespaceEnv, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + corev1.EnvVar{ + Name: automationConfigEnv, + Value: automationConfigSecretName, + }, corev1.EnvVar{ Name: agentHealthStatusFilePathEnv, Value: agentHealthStatusFilePathValue, @@ -694,7 +762,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(mdb.Spec.Members), + statefulset.WithReplicas(mdb.MembersThisReconciliation()), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( @@ -704,7 +772,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific podtemplatespec.WithVolume(hooksVolume), podtemplatespec.WithVolume(automationConfigVolume), podtemplatespec.WithServiceAccount(operatorServiceAccountName), - podtemplatespec.WithContainer(agentName, mongodbAgentContainer([]corev1.VolumeMount{agentHealthStatusVolumeMount, automationConfigVolumeMount, dataVolume})), + podtemplatespec.WithContainer(agentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), []corev1.VolumeMount{agentHealthStatusVolumeMount, automationConfigVolumeMount, dataVolume})), podtemplatespec.WithContainer(mongodbName, mongodbContainer(mdb.Spec.Version, []corev1.VolumeMount{mongodHealthStatusVolumeMount, dataVolume, hooksVolumeMount})), podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount})), buildTLSPodSpecModification(mdb), @@ -733,7 +801,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(240), + probes.WithFailureThreshold(10), probes.WithInitialDelaySeconds(5), ) } diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go index db989cd57..f07d43822 100644 --- a/pkg/util/scale/scale.go +++ b/pkg/util/scale/scale.go @@ -16,7 +16,7 @@ func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { return replicaSetScaler.DesiredReplicaSetMembers() } - if isScalingDown(replicaSetScaler) { + if IsScalingDown(replicaSetScaler) { return replicaSetScaler.CurrentReplicaSetMembers() - 1 } @@ -28,7 +28,7 @@ func IsStillScaling(replicaSetScaler ReplicaSetScaler) bool { return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicaSetMembers() } -func isScalingDown(replicaSetScaler ReplicaSetScaler) bool { +func IsScalingDown(replicaSetScaler ReplicaSetScaler) bool { return replicaSetScaler.DesiredReplicaSetMembers() < replicaSetScaler.CurrentReplicaSetMembers() } From 3fbe0bd56f109ab5c23d89051fc8a47eab8116a2 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 10 Sep 2020 17:02:50 +0100 Subject: [PATCH 11/39] renamed to replicas --- pkg/apis/mongodb/v1/mongodb_types.go | 6 +++--- pkg/controller/mongodb/replica_set_controller.go | 10 +++++----- pkg/util/scale/scale.go | 16 ++++++++-------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 9dc39b46f..cce57c529 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -220,15 +220,15 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m MongoDB) DesiredReplicaSetMembers() int { +func (m MongoDB) DesiredReplicas() int { return m.Spec.Members } -func (m MongoDB) CurrentReplicaSetMembers() int { +func (m MongoDB) CurrentReplicas() int { return m.Status.Members } -func (m *MongoDB) MembersThisReconciliation() int { +func (m *MongoDB) ReplicasThisReconciliation() int { return scale.ReplicasThisReconciliation(m) } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index ebb3f9f2c..bd5e20518 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -177,8 +177,8 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R r.log = zap.S().With("ReplicaSet", request.NamespacedName) r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status, - "desiredMembers", mdb.DesiredReplicaSetMembers(), - "currentMembers", mdb.CurrentReplicaSetMembers(), + "desiredMembers", mdb.DesiredReplicas(), + "currentMembers", mdb.CurrentReplicas(), ) if err := r.ensureAutomationConfig(mdb); err != nil { @@ -295,15 +295,15 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if scale.IsStillScaling(mdb) { return status.Update(r.client.Status(), &mdb, statusOptions(). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", - mdb.CurrentReplicaSetMembers(), mdb.DesiredReplicaSetMembers())). - withMembers(mdb.MembersThisReconciliation()). + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withMembers(mdb.ReplicasThisReconciliation()). withPendingPhase(0), ) } res, err := status.Update(r.client.Status(), &mdb, statusOptions(). withMongoURI(mdb.MongoURI()). - withMembers(mdb.MembersThisReconciliation()). + withMembers(mdb.ReplicasThisReconciliation()). withMessage(None, ""). withRunningPhase(), ) diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go index db989cd57..40a957a6b 100644 --- a/pkg/util/scale/scale.go +++ b/pkg/util/scale/scale.go @@ -3,8 +3,8 @@ package scale // ReplicaSetScaler is an interface which is able to scale up and down a replicaset // a single member at a time type ReplicaSetScaler interface { - DesiredReplicaSetMembers() int - CurrentReplicaSetMembers() int + DesiredReplicas() int + CurrentReplicas() int } // ReplicasThisReconciliation returns the number of replicas that should be configured @@ -12,24 +12,24 @@ type ReplicaSetScaler interface { func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { // the current replica set members will be 0 when we are creating a new deployment // if this is the case, we want to jump straight to the desired members and not make changes incrementally - if replicaSetScaler.CurrentReplicaSetMembers() == 0 || replicaSetScaler.CurrentReplicaSetMembers() == replicaSetScaler.DesiredReplicaSetMembers() { - return replicaSetScaler.DesiredReplicaSetMembers() + if replicaSetScaler.CurrentReplicas() == 0 || replicaSetScaler.CurrentReplicas() == replicaSetScaler.DesiredReplicas() { + return replicaSetScaler.DesiredReplicas() } if isScalingDown(replicaSetScaler) { - return replicaSetScaler.CurrentReplicaSetMembers() - 1 + return replicaSetScaler.CurrentReplicas() - 1 } - return replicaSetScaler.CurrentReplicaSetMembers() + 1 + return replicaSetScaler.CurrentReplicas() + 1 } func IsStillScaling(replicaSetScaler ReplicaSetScaler) bool { - return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicaSetMembers() + return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicas() } func isScalingDown(replicaSetScaler ReplicaSetScaler) bool { - return replicaSetScaler.DesiredReplicaSetMembers() < replicaSetScaler.CurrentReplicaSetMembers() + return replicaSetScaler.DesiredReplicas() < replicaSetScaler.CurrentReplicas() } // AnyAreStillScaling reports true if any of one the provided members is still scaling From 71d7f4bf3168d7e55c0fa0d80420a26155b545b4 Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 10:26:35 +0100 Subject: [PATCH 12/39] increased test timeout --- cmd/testrunner/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go index 21b1d113a..192a2c707 100644 --- a/cmd/testrunner/main.go +++ b/cmd/testrunner/main.go @@ -271,7 +271,7 @@ func withTest(test string) func(obj runtime.Object) { "--kubeconfig", "/etc/config/kubeconfig", "--go-test-flags", - "-timeout=20m", + "-timeout=40m", } } } From 531394e9eada4feb0d193eb20e458a692be255da Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 10:49:44 +0100 Subject: [PATCH 13/39] updated scaling test with correct automation config versions --- pkg/controller/mongodb/replica_set_controller.go | 8 -------- test/e2e/replica_set_scale/replica_set_scaling_test.go | 4 ++-- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 13dddaed0..dc6b42b2a 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -323,14 +323,6 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R withFailedPhase(), ) } - // - //r.log.Debug("Updating MongoDB Status") - //if err := r.client.Get(context.TODO(), mdb.NamespacedName(), &mdb); err != nil { - // return status.Update(r.client.Status(), &mdb, statusOptions(). - // withMessage(Error, fmt.Sprintf("could not get get resource: %s", err)). - // withFailedPhase(), - // ) - //} if scale.IsStillScaling(mdb) { return status.Update(r.client.Status(), &mdb, statusOptions(). diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 514f5c14c..30d079502 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -46,7 +46,7 @@ func TestReplicaSetScale(t *testing.T) { t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb, 5)) t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetIsReady(&mdb)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 2)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), @@ -56,7 +56,7 @@ func TestReplicaSetScale(t *testing.T) { t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), From cc4033ca2e147c698167c95028683b45642830c2 Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 11:11:26 +0100 Subject: [PATCH 14/39] wip --- pkg/controller/mongodb/mongodb_status_options.go | 4 ++++ pkg/controller/mongodb/replica_set_controller.go | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index a2e5085fc..eaba9fe4f 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -16,6 +16,7 @@ type severity string const ( Info severity = "INFO" + Debug severity = "DEBUG" Warn severity = "WARN" Error severity = "ERROR" None severity = "NONE" @@ -107,6 +108,9 @@ func (m messageOption) ApplyOption(mdb *mdbv1.MongoDB) { if m.message.severityLevel == Info { zap.S().Info(m.message.messageString) } + if m.message.severityLevel == Debug { + zap.S().Debug(m.message.messageString) + } } func (m messageOption) GetResult() (reconcile.Result, error) { diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 01cafb9d2..b46de4a95 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -204,13 +204,15 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R // if we're scaling down, we need to wait until the if scale.IsScalingDown(mdb) { isAtDesiredReplicaCount, err := r.isExpectedNumberOfStatefulSetReplicasReady(mdb) - r.log.Infow("Replica Set is scaling", "isAtDesiredReplicaCount", isAtDesiredReplicaCount) if err != nil { r.log.Errorf("error: %s", err) return reconcile.Result{}, err } if !isAtDesiredReplicaCount { + r.log.Infow("not yet at the desired number of replicas, requeuing reconciliation", + "currentReplicas", mdb.CurrentReplicas(), "desiredReplicas", mdb.DesiredReplicas()) + return status.Update(r.client.Status(), &mdb, statusOptions(). withMessage(Info, fmt.Sprintf("Replica Set is scaling down, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). @@ -249,7 +251,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !readyToUpdateSts { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Info, fmt.Sprintf("Not ready to update MongoDB resource %s", mdb.NamespacedName())). + withMessage(Debug, fmt.Sprintf("Not ready to update StatefulSet: %s", mdb.NamespacedName())). withPendingPhase(5), ) } From 7fefc7481ccb863d1c7c10484c08b33d95e68e5a Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 11:14:58 +0100 Subject: [PATCH 15/39] configure operator to use headless mode configuration for readiness probe --- .../mongodb/build_statefulset_test.go | 2 +- .../mongodb/replica_set_controller.go | 42 ++++++++++++++----- .../mongodb/replicaset_controller_test.go | 2 +- 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/pkg/controller/mongodb/build_statefulset_test.go b/pkg/controller/mongodb/build_statefulset_test.go index e02ae61b0..bc7cd81ca 100644 --- a/pkg/controller/mongodb/build_statefulset_test.go +++ b/pkg/controller/mongodb/build_statefulset_test.go @@ -47,7 +47,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDB, sts *app assert.Equal(t, mdb.Namespace, sts.Namespace) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) assert.Equal(t, operatorServiceAccountName, sts.Spec.Template.Spec.ServiceAccountName) - assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 1) + assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 4) assert.Len(t, sts.Spec.Template.Spec.Containers[1].Env, 1) agentContainer := sts.Spec.Template.Spec.Containers[0] diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index bd5e20518..00c6c0dd9 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -62,15 +62,18 @@ const ( mongodbImageEnv = "MONGODB_IMAGE" mongodbRepoUrl = "MONGODB_REPO_URL" mongodbToolsVersionEnv = "MONGODB_TOOLS_VERSION" + headlessAgentEnv = "HEADLESS_AGENT" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigEnv = "AUTOMATION_CONFIG_MAP" - AutomationConfigKey = "automation-config" + AutomationConfigKey = "cluster-config.json" agentName = "mongodb-agent" mongodbName = "mongod" versionUpgradeHookName = "mongod-posthook" dataVolumeName = "data-volume" versionManifestFilePath = "/usr/local/version_manifest.json" readinessProbePath = "/var/lib/mongodb-mms-automation/probes/readinessprobe" - clusterFilePath = "/var/lib/automation/config/automation-config" + clusterFilePath = "/var/lib/automation/config/cluster-config.json" operatorServiceAccountName = "mongodb-kubernetes-operator" agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" @@ -184,7 +187,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.ensureAutomationConfig(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, fmt.Sprintf("error creating automation config config map: %s", err)). + withMessage(Error, fmt.Sprintf("error creating automation config secret: %s", err)). withFailedPhase(), ) } @@ -518,17 +521,17 @@ func (r ReplicaSetReconciler) buildAutomationConfigSecret(mdb mdbv1.MongoDB) (co authModification, err := scram.EnsureScram(r.client, mdb.ScramCredentialsNamespacedName(), mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not ensure scram credentials: %s", err) } tlsModification, err := getTLSConfigModification(r.client, mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not configure TLS modification: %s", err) } currentAC, err := getCurrentAutomationConfig(r.client, mdb) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, errors.Errorf("could not read existing automation config: %s", err) } ac, err := buildAutomationConfig( @@ -539,11 +542,11 @@ func (r ReplicaSetReconciler) buildAutomationConfigSecret(mdb mdbv1.MongoDB) (co tlsModification, ) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, fmt.Errorf("could not build automation config: %s", err) } acBytes, err := json.Marshal(ac) if err != nil { - return corev1.Secret{}, err + return corev1.Secret{}, fmt.Errorf("could not marshal automation config: %s", err) } return secret.Builder(). @@ -589,7 +592,7 @@ func isChangingVersion(mdb mdbv1.MongoDB) bool { return false } -func mongodbAgentContainer(volumeMounts []corev1.VolumeMount) container.Modification { +func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount) container.Modification { return container.Apply( container.WithName(agentName), container.WithImage(os.Getenv(agentImageEnv)), @@ -608,6 +611,23 @@ func mongodbAgentContainer(volumeMounts []corev1.VolumeMount) container.Modifica }, ), container.WithEnvs( + corev1.EnvVar{ + Name: headlessAgentEnv, + Value: "true", + }, + corev1.EnvVar{ + Name: podNamespaceEnv, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + corev1.EnvVar{ + Name: automationConfigEnv, + Value: automationConfigSecretName, + }, corev1.EnvVar{ Name: agentHealthStatusFilePathEnv, Value: agentHealthStatusFilePathValue, @@ -694,7 +714,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(mdb.Spec.Members), + statefulset.WithReplicas(mdb.ReplicasThisReconciliation()), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( @@ -704,7 +724,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific podtemplatespec.WithVolume(hooksVolume), podtemplatespec.WithVolume(automationConfigVolume), podtemplatespec.WithServiceAccount(operatorServiceAccountName), - podtemplatespec.WithContainer(agentName, mongodbAgentContainer([]corev1.VolumeMount{agentHealthStatusVolumeMount, automationConfigVolumeMount, dataVolume})), + podtemplatespec.WithContainer(agentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), []corev1.VolumeMount{agentHealthStatusVolumeMount, automationConfigVolumeMount, dataVolume})), podtemplatespec.WithContainer(mongodbName, mongodbContainer(mdb.Spec.Version, []corev1.VolumeMount{mongodHealthStatusVolumeMount, dataVolume, hooksVolumeMount})), podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount})), buildTLSPodSpecModification(mdb), diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index 7d2b122de..e3f6d0f06 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -522,7 +522,7 @@ func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { func TestOpenshift_Configuration(t *testing.T) { sts := performReconciliationAndGetStatefulSet(t, "openshift_mdb.yaml") - assert.Equal(t, "MANAGED_SECURITY_CONTEXT", sts.Spec.Template.Spec.Containers[0].Env[1].Name) + assert.Equal(t, "MANAGED_SECURITY_CONTEXT", sts.Spec.Template.Spec.Containers[0].Env[3].Name) assert.Equal(t, "MANAGED_SECURITY_CONTEXT", sts.Spec.Template.Spec.Containers[1].Env[1].Name) } From b474a1db592ab816ae3041d16f76829b95ce12df Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 12:49:39 +0100 Subject: [PATCH 16/39] updated agent dockerfile --- agent/Dockerfile | 2 +- cmd/testrunner/main.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/agent/Dockerfile b/agent/Dockerfile index 98861316e..d7df0e3b5 100644 --- a/agent/Dockerfile +++ b/agent/Dockerfile @@ -21,7 +21,7 @@ RUN mkdir -p agent \ && rm -r mongodb-mms-automation-agent-* RUN mkdir -p /var/lib/mongodb-mms-automation/probes/ \ - && curl --retry 3 https://readinessprobe-test.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ + && curl --retry 3 https://readinessprobe.s3-us-west-1.amazonaws.com/readiness -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ && chmod +x /var/lib/mongodb-mms-automation/probes/readinessprobe \ && mkdir -p /var/log/mongodb-mms-automation/ \ && chmod -R +wr /var/log/mongodb-mms-automation/ \ diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go index 21b1d113a..6a3168f45 100644 --- a/cmd/testrunner/main.go +++ b/cmd/testrunner/main.go @@ -270,6 +270,7 @@ func withTest(test string) func(obj runtime.Object) { "--verbose", "--kubeconfig", "/etc/config/kubeconfig", + "--go-test-flags", "-timeout=20m", } From 741f9c8bf27db0ac4c648afc3ec0a9cf0efeb3ed Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 12:49:48 +0100 Subject: [PATCH 17/39] updated agent dockerfile --- cmd/testrunner/main.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go index 6a3168f45..21b1d113a 100644 --- a/cmd/testrunner/main.go +++ b/cmd/testrunner/main.go @@ -270,7 +270,6 @@ func withTest(test string) func(obj runtime.Object) { "--verbose", "--kubeconfig", "/etc/config/kubeconfig", - "--go-test-flags", "-timeout=20m", } From c63f8da8b76f6a429eb62a170c805e35f0bbcadc Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 14:29:34 +0100 Subject: [PATCH 18/39] publish not ready addresses --- pkg/controller/mongodb/replica_set_controller.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 00c6c0dd9..7fb76a0df 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -494,6 +494,7 @@ func buildService(mdb mdbv1.MongoDB) corev1.Service { SetServiceType(corev1.ServiceTypeClusterIP). SetClusterIP("None"). SetPort(27017). + SetPublishNotReadyAddresses(true). Build() } From 9fb410501b15bf290730507d8608b1308b339113 Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 11 Sep 2020 15:03:07 +0100 Subject: [PATCH 19/39] bumped failure threshold to 30 --- pkg/controller/mongodb/replica_set_controller.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index aa9e21609..cca4fb343 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -181,6 +181,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R "currentMembers", mdb.CurrentReplicas(), ) + r.log.Debug("Ensuring Automation Config for deployment") if err := r.ensureAutomationConfig(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). @@ -218,6 +219,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R } } + r.log.Debug("Validating TLS Config") isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { return status.Update(r.client.Status(), &mdb, @@ -756,6 +758,8 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific dataVolume := statefulset.CreateVolumeMount(dataVolumeName, "/data") + zap.S().Debugw("BuildingStatefulSet", "replicas", mdb.ReplicasThisReconciliation()) + return statefulset.Apply( statefulset.WithName(mdb.Name), statefulset.WithNamespace(mdb.Namespace), @@ -802,7 +806,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(10), + probes.WithFailureThreshold(30), probes.WithInitialDelaySeconds(5), ) } From 13ab6017fe48bdbf0cc5ebdd3d08b04d216b10bd Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 14 Sep 2020 16:13:11 +0100 Subject: [PATCH 20/39] updated python scripts to use correct namespace --- scripts/dev/dump_diagnostic.py | 4 +++- scripts/dev/e2e.py | 7 ++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/dev/dump_diagnostic.py b/scripts/dev/dump_diagnostic.py index c426cc7e3..7f79a927b 100644 --- a/scripts/dev/dump_diagnostic.py +++ b/scripts/dev/dump_diagnostic.py @@ -94,7 +94,9 @@ def dump_automation_configs(namespace: str) -> None: return for mdb in mongodb_resources: name = mdb["metadata"]["name"] - dump_secret_keys_namespaced(namespace, ["automation-config"], f"{name}-config") + dump_secret_keys_namespaced( + namespace, ["cluster-config.json"], f"{name}-config" + ) def dump_all(namespace: str) -> None: diff --git a/scripts/dev/e2e.py b/scripts/dev/e2e.py index 7b5d9cf7d..f52be5219 100644 --- a/scripts/dev/e2e.py +++ b/scripts/dev/e2e.py @@ -78,11 +78,12 @@ def _prepare_testrunner_environment(config_file: str) -> None: ) -def create_kube_config() -> None: +def create_kube_config(config_file: str) -> None: """Replicates the local kubeconfig file (pointed at by KUBECONFIG), as a ConfigMap.""" corev1 = client.CoreV1Api() print("Creating kube-config ConfigMap") + dev_config = load_config(config_file) svc = corev1.read_namespaced_service("kubernetes", "default") @@ -104,7 +105,7 @@ def create_kube_config() -> None: ) k8s_conditions.ignore_if_already_exists( - lambda: corev1.create_namespaced_config_map("default", config_map) + lambda: corev1.create_namespaced_config_map(dev_config.namespace, config_map) ) @@ -318,7 +319,7 @@ def main() -> int: config.load_kube_config() dev_config = load_config(args.config_file) - create_kube_config() + create_kube_config(args.config_file) try: build_and_push_images(args, dev_config) From a8ca5322a0677e1f52859779f8122f5024e9fcde Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 11:18:48 +0100 Subject: [PATCH 21/39] fixed ready replicas --- .dockerignore | 1 - README.md | 2 +- build/Dockerfile | 8 ++++++++ deploy/{ => openshift}/operator_openshift.yaml | 0 pkg/controller/mongodb/replica_set_controller.go | 3 ++- scripts/dev/e2e.py | 2 -- 6 files changed, 11 insertions(+), 5 deletions(-) rename deploy/{ => openshift}/operator_openshift.yaml (100%) diff --git a/.dockerignore b/.dockerignore index f8f340043..5ba6cada8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,7 +1,6 @@ .github .idea agent -build/_output zz_* vendor/ scripts/ diff --git a/README.md b/README.md index 93e58d7c0..0c7ce85a0 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ If you want to deploy the operator on OpenShift you will have to provide the env See [here](deploy/crds/mongodb.com_v1_mongodb_openshift_cr.yaml) for an example of how to provide the required configuration for a MongoDB ReplicaSet. -See [here](deploy/operator_openshift.yaml) for an example of how to configure the Operator deployment. +See [here](deploy/openshift/operator_openshift.yaml) for an example of how to configure the Operator deployment. #### Example diff --git a/build/Dockerfile b/build/Dockerfile index ac45631e1..91290f027 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -4,6 +4,14 @@ ENV OPERATOR=/usr/local/bin/mongodb-kubernetes-operator \ USER_UID=1001 \ USER_NAME=mongodb-kubernetes-operator +RUN apt-get update && \ + apt-get install -y curl + +ENV manifest_version=4.4 +RUN mkdir -p /content/ \ + && chmod -R +r /content/ \ + && curl --fail --retry 3 -o /usr/local/version_manifest.json "https://opsmanager.mongodb.com/static/version_manifest/${manifest_version}.json" + # install operator binary COPY build/_output/bin/mongodb-kubernetes-operator ${OPERATOR} diff --git a/deploy/operator_openshift.yaml b/deploy/openshift/operator_openshift.yaml similarity index 100% rename from deploy/operator_openshift.yaml rename to deploy/openshift/operator_openshift.yaml diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index b0bdb3312..31b5d3bb6 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -252,6 +252,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !ready { return status.Update(r.client.Status(), &mdb, statusOptions(). + withMembers(int(currentSts.Status.ReadyReplicas)). withMessage(Info, fmt.Sprintf("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name)). withPendingPhase(10), ) @@ -359,7 +360,7 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := statefulset.IsReady(*existingStatefulSet, mdb.Spec.Members) + isReady := statefulset.IsReady(*existingStatefulSet, mdb.ReplicasThisReconciliation()) if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ diff --git a/scripts/dev/e2e.py b/scripts/dev/e2e.py index f52be5219..679373898 100644 --- a/scripts/dev/e2e.py +++ b/scripts/dev/e2e.py @@ -7,14 +7,12 @@ load_yaml_from_file, ) import k8s_conditions -import k8s_request_data import dump_diagnostic from dockerutil import build_and_push_image from typing import Dict from dev_config import load_config, DevConfig from kubernetes import client, config import argparse -import time import os import sys import yaml From 76aeef6a52c8c37074ff5f372d3cb52cdf3bd76a Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 12:50:29 +0100 Subject: [PATCH 22/39] fixed unit tests --- pkg/controller/mongodb/replicaset_controller_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index e3f6d0f06..bfa373045 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -432,6 +432,8 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.Members) + makeStatefulSetReady(t, mgr.GetClient(), mdb) + res, err = r.Reconcile(reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) @@ -472,6 +474,8 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.Members) + makeStatefulSetReady(t, mgr.GetClient(), mdb) + res, err = r.Reconcile(reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) @@ -631,8 +635,8 @@ func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { sts := appsv1.StatefulSet{} err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) assert.NoError(t, err) - sts.Status.ReadyReplicas = int32(mdb.Spec.Members) - sts.Status.UpdatedReplicas = int32(mdb.Spec.Members) + sts.Status.ReadyReplicas = int32(mdb.ReplicasThisReconciliation()) + sts.Status.UpdatedReplicas = int32(mdb.ReplicasThisReconciliation()) err = c.Update(context.TODO(), &sts) assert.NoError(t, err) } From fadd770683eb0132ee73f485f6c3bdfffa8e2f67 Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 15:49:00 +0100 Subject: [PATCH 23/39] set replicas back to spec.Members --- pkg/controller/mongodb/replica_set_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 31b5d3bb6..c68a6eec8 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -717,7 +717,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(mdb.ReplicasThisReconciliation()), + statefulset.WithReplicas(mdb.Spec.Members), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( From be838db56df6ed3de7e92286a9441f30e90fbd84 Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 16:24:57 +0100 Subject: [PATCH 24/39] typos --- pkg/controller/mongodb/replica_set_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index adbdb64f3..5e23bcb95 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -455,7 +455,7 @@ func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDB) erro return nil } -// setAnnotations updates the monogdb resource annotations by applying the provided annotations +// setAnnotations updates the mongodb resource annotations by applying the provided annotations // on top of the existing ones func (r ReplicaSetReconciler) setAnnotations(nsName types.NamespacedName, annotations map[string]string) error { mdb := mdbv1.MongoDB{} @@ -534,7 +534,7 @@ func versionManifestFromBytes(bytes []byte) (automationconfig.VersionManifest, e // buildService creates a Service that will be used for the Replica Set StatefulSet // that allows all the members of the STS to see each other. -// TODO: Make sure this Service is as minimal as posible, to not interfere with +// TODO: Make sure this Service is as minimal as possible, to not interfere with // future implementations and Service Discovery mechanisms we might implement. func buildService(mdb mdbv1.MongoDB) corev1.Service { label := make(map[string]string) From eaceb222fa208a6da85a6e2374538dbf305ab7f3 Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 16:34:02 +0100 Subject: [PATCH 25/39] fix unit tests --- pkg/controller/mongodb/build_statefulset_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/build_statefulset_test.go b/pkg/controller/mongodb/build_statefulset_test.go index 0b2ae6bc7..83d151ec0 100644 --- a/pkg/controller/mongodb/build_statefulset_test.go +++ b/pkg/controller/mongodb/build_statefulset_test.go @@ -54,7 +54,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDB, sts *app assert.Equal(t, "agent-image", agentContainer.Image) probe := agentContainer.ReadinessProbe assert.True(t, reflect.DeepEqual(probes.New(defaultReadiness()), *probe)) - assert.Equal(t, int32(10), probe.FailureThreshold) + assert.Equal(t, probes.New(defaultReadiness()).FailureThreshold, probe.FailureThreshold) assert.Equal(t, int32(5), probe.InitialDelaySeconds) assert.Len(t, agentContainer.VolumeMounts, 4) From 0055b250caf5bb0e89d3b5b5c2e9189715bc7a6f Mon Sep 17 00:00:00 2001 From: chatton Date: Tue, 15 Sep 2020 18:03:37 +0100 Subject: [PATCH 26/39] wip --- .../mongodb/replica_set_controller.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 5e23bcb95..e5fcfba1e 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -212,10 +212,11 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R r.log.Infow("not yet at the desired number of replicas, requeuing reconciliation", "currentReplicas", mdb.CurrentReplicas(), "desiredReplicas", mdb.DesiredReplicas()) - return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Info, fmt.Sprintf("Replica Set is scaling down, currentMembers=%d, desiredMembers=%d", - mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withPendingPhase(10), + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("Replica Set is scaling down, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withPendingPhase(10), ) } } @@ -760,7 +761,12 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific dataVolume := statefulset.CreateVolumeMount(dataVolumeName, "/data") - zap.S().Debugw("BuildingStatefulSet", "replicas", mdb.ReplicasThisReconciliation()) + replicas := mdb.Spec.Members + if scale.IsScalingDown(mdb) { + replicas = mdb.ReplicasThisReconciliation() + } + + zap.S().Debugw("BuildingStatefulSet", "replicas", replicas) return statefulset.Apply( statefulset.WithName(mdb.Name), @@ -769,7 +775,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(mdb.Spec.Members), + statefulset.WithReplicas(replicas), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( From 3cee13aca55f0e243fbab8488f5d61e72b6d9bb3 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 16 Sep 2020 15:20:04 +0100 Subject: [PATCH 27/39] split members status field into 2 different fields, update status during reconciliation to keep track of the value correctly --- pkg/apis/mongodb/v1/mongodb_types.go | 30 ++++++- .../mongodb/mongodb_status_options.go | 57 ++++++++----- .../mongodb/mongodb_status_options_test.go | 13 --- .../mongodb/replica_set_controller.go | 80 +++++++++++-------- .../mongodb/replicaset_controller_test.go | 19 ++--- test/e2e/mongodbtests/mongodbtests.go | 7 +- .../replica_set_multiple_test.go | 14 ++-- .../replica_set_readiness_probe_test.go | 7 +- .../replica_set_scaling_test.go | 14 ++-- 9 files changed, 143 insertions(+), 98 deletions(-) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 493b77d0b..de82e37ad 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -201,8 +201,11 @@ type AuthMode string type MongoDBStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` - Members int `json:"members"` - Message string `json:"message,omitempty"` + + CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"` + CurrentReplicaSetMembers int `json:"currentReplicaSetMembers"` + + Message string `json:"message,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -225,13 +228,32 @@ func (m MongoDB) DesiredReplicas() int { } func (m MongoDB) CurrentReplicas() int { - return m.Status.Members + return m.Status.CurrentStatefulSetReplicas } -func (m *MongoDB) ReplicasThisReconciliation() int { +func (m *MongoDB) StatefulSetReplicasThisReconciliation() int { return scale.ReplicasThisReconciliation(m) } +type intScaler struct { + current, desired int +} + +func (a intScaler) DesiredReplicas() int { + return a.desired +} + +func (a intScaler) CurrentReplicas() int { + return a.current +} + +func (m MongoDB) AutomationConfigMembersThisReconciliation() int { + return scale.ReplicasThisReconciliation(intScaler{ + desired: m.Spec.Members, + current: m.Status.CurrentReplicaSetMembers, + }) +} + // MongoURI returns a mongo uri which can be used to connect to this deployment func (m MongoDB) MongoURI() string { members := make([]string, m.Spec.Members) diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index eaba9fe4f..90ee13ef9 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -60,25 +60,6 @@ func (m mongoUriOption) GetResult() (reconcile.Result, error) { return okResult() } -func (o *optionBuilder) withMembers(members int) *optionBuilder { - o.options = append(o.options, - membersOption{ - members: members, - }) - return o -} - -type membersOption struct { - members int -} - -func (m membersOption) ApplyOption(mdb *mdbv1.MongoDB) { - mdb.Status.Members = m.members -} - -func (m membersOption) GetResult() (reconcile.Result, error) { - return okResult() -} func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { o.options = append(o.options, phaseOption{ @@ -117,6 +98,20 @@ func (m messageOption) GetResult() (reconcile.Result, error) { return okResult() } +func (o *optionBuilder) withAutomationConfigMembers(members int) *optionBuilder { + o.options = append(o.options, automationConfigReplicasOption{ + replicaSetMembers: members, + }) + return o +} + +func (o *optionBuilder) withStatefulSetReplicas(members int) *optionBuilder { + o.options = append(o.options, statefulSetReplicasOption{ + replicas: members, + }) + return o +} + func (o *optionBuilder) withMessage(severityLevel severity, msg string) *optionBuilder { o.options = append(o.options, messageOption{ message: message{ @@ -161,6 +156,30 @@ func (p phaseOption) GetResult() (reconcile.Result, error) { return okResult() } +type automationConfigReplicasOption struct { + replicaSetMembers int +} + +func (a automationConfigReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.CurrentReplicaSetMembers = a.replicaSetMembers +} + +func (a automationConfigReplicasOption) GetResult() (reconcile.Result, error) { + return okResult() +} + +type statefulSetReplicasOption struct { + replicas int +} + +func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.CurrentStatefulSetReplicas = s.replicas +} + +func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { + return okResult() +} + // helper functions which return reconciliation results which should be // returned from the main reconciliation loop diff --git a/pkg/controller/mongodb/mongodb_status_options_test.go b/pkg/controller/mongodb/mongodb_status_options_test.go index 757732159..f55ba0518 100644 --- a/pkg/controller/mongodb/mongodb_status_options_test.go +++ b/pkg/controller/mongodb/mongodb_status_options_test.go @@ -22,19 +22,6 @@ func TestMongoUriOption_ApplyOption(t *testing.T) { assert.Equal(t, "my-uri", mdb.Status.MongoURI, "Status should be updated") } -func TestMembersOption_ApplyOption(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") - - opt := membersOption{ - members: 5, - } - - opt.ApplyOption(&mdb) - - assert.Equal(t, 3, mdb.Spec.Members, "Spec should remain unchanged") - assert.Equal(t, 5, mdb.Status.Members, "Status should be updated") -} - func TestOptionBuilder_RunningPhase(t *testing.T) { mdb := newReplicaSet(3, "my-rs", "my-ns") diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index e5fcfba1e..992427b50 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -221,6 +221,20 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R } } + // at this stage we know we have successfully updated the automation config with the correct number of + // members and the stateful set has the expected number of ready replicas. We can update our status + // so we calculate these fields correctly going forward + _, err = status.Update(r.client.Status(), &mdb, + statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()), + ) + + if err != nil { + r.log.Errorf("error updating status: %s", err) + return reconcile.Result{}, err + } + r.log.Debug("Validating TLS Config") isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { @@ -244,9 +258,9 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R } readyToUpdateSts := true - if mdbSts.Status.Replicas > int32(mdb.ReplicasThisReconciliation()) { - readyToUpdateSts = mdbSts.Status.ReadyReplicas == int32(mdb.ReplicasThisReconciliation()) - r.log.Debugw("ReplicaSet Scaling", "readyReplicas", mdbSts.Status.Replicas, "replicasThisReconciliation", mdb.ReplicasThisReconciliation()) + if mdbSts.Status.Replicas > int32(mdb.StatefulSetReplicasThisReconciliation()) { + readyToUpdateSts = mdbSts.Status.ReadyReplicas == int32(mdb.StatefulSetReplicasThisReconciliation()) + r.log.Debugw("ReplicaSet Scaling", "readyReplicas", mdbSts.Status.Replicas, "replicasThisReconciliation", mdb.StatefulSetReplicasThisReconciliation()) } if !readyToUpdateSts { @@ -272,18 +286,19 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !apiErrors.IsNotFound(err) { errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) } - return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, errMsg). - withFailedPhase(), + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, errMsg). + withFailedPhase(), ) - } r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", getUpdateStrategyType(mdb)) ready, err := r.isStatefulSetReady(mdb, ¤tSts) if err != nil { return status.Update(r.client.Status(), &mdb, - statusOptions().withMessage(Error, fmt.Sprintf("Error checking StatefulSet status: %s", err)). + statusOptions(). + withMessage(Error, fmt.Sprintf("Error checking StatefulSet status: %s", err)). withFailedPhase(), ) } @@ -291,7 +306,9 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if !ready { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMembers(int(currentSts.Status.ReadyReplicas)). + // need to update the current replicas as they get ready so eventually the desired number becomes + // ready one at a time + withStatefulSetReplicas(int(currentSts.Status.ReadyReplicas)). withMessage(Info, fmt.Sprintf("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name)). withPendingPhase(10), ) @@ -301,6 +318,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). withFailedPhase(), ) @@ -315,6 +333,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error setting annotations: %s", err)). withFailedPhase(), ) @@ -323,6 +342,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.completeTLSRollout(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error completing TLS rollout: %s", err)). withFailedPhase(), ) @@ -330,27 +350,21 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if scale.IsStillScaling(mdb) { return status.Update(r.client.Status(), &mdb, statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withMembers(mdb.ReplicasThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). withPendingPhase(0), ) } - if scale.IsStillScaling(mdb) { - return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", - mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withMembers(mdb.ReplicasThisReconciliation()). - withPendingPhase(0), - ) - } - - res, err := status.Update(r.client.Status(), &mdb, statusOptions(). - withMongoURI(mdb.MongoURI()). - withMembers(mdb.ReplicasThisReconciliation()). - withMessage(None, ""). - withRunningPhase(), + res, err := status.Update(r.client.Status(), &mdb, + statusOptions(). + withMongoURI(mdb.MongoURI()). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withMessage(None, ""). + withRunningPhase(), ) if err != nil { @@ -400,7 +414,7 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := statefulset.IsReady(*existingStatefulSet, mdb.ReplicasThisReconciliation()) + isReady := statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ @@ -426,7 +440,7 @@ func (r *ReplicaSetReconciler) isExpectedNumberOfStatefulSetReplicasReady(mdb md if err != nil { return false, err } - desiredReadyReplicas := int32(mdb.ReplicasThisReconciliation()) + desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) r.log.Infow("isExpectedNumberOfStatefulSetReplicasReady", "desiredReadyReplicas", desiredReadyReplicas) return sts.Status.ReadyReplicas == desiredReadyReplicas, nil @@ -480,12 +494,13 @@ func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { func buildAutomationConfig(mdb mdbv1.MongoDB, mdbVersionConfig automationconfig.MongoDbVersionConfig, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDNSName)) + zap.S().Debugw("AutomationConfigMembersThisReconciliation", "mdb.AutomationConfigMembersThisReconciliation()", mdb.AutomationConfigMembersThisReconciliation()) builder := automationconfig.NewBuilder(). SetTopology(automationconfig.ReplicaSetTopology). SetName(mdb.Name). SetDomain(domain). - SetMembers(mdb.ReplicasThisReconciliation()). + SetMembers(mdb.AutomationConfigMembersThisReconciliation()). SetPreviousAutomationConfig(currentAc). SetMongoDBVersion(mdb.Spec.Version). SetFCV(mdb.GetFCV()). @@ -761,12 +776,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific dataVolume := statefulset.CreateVolumeMount(dataVolumeName, "/data") - replicas := mdb.Spec.Members - if scale.IsScalingDown(mdb) { - replicas = mdb.ReplicasThisReconciliation() - } - - zap.S().Debugw("BuildingStatefulSet", "replicas", replicas) + zap.S().Debugw("BuildingStatefulSet", "replicas", mdb.StatefulSetReplicasThisReconciliation()) return statefulset.Apply( statefulset.WithName(mdb.Name), @@ -775,7 +785,7 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific statefulset.WithLabels(labels), statefulset.WithMatchLabels(labels), statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), - statefulset.WithReplicas(replicas), + statefulset.WithReplicas(mdb.StatefulSetReplicasThisReconciliation()), statefulset.WithUpdateStrategyType(getUpdateStrategyType(mdb)), statefulset.WithVolumeClaim(dataVolumeName, defaultPvc()), statefulset.WithPodSpecTemplate( @@ -814,7 +824,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(30), + probes.WithFailureThreshold(15), probes.WithInitialDelaySeconds(5), ) } diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index bfa373045..4539a272a 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -412,7 +412,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 5, mdb.Status.Members) + assert.Equal(t, 5, mdb.Status.CurrentReplicaSetMembers) // scale members from five to three mdb.Spec.Members = 3 @@ -424,13 +424,14 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin res, err = r.Reconcile(reconcile.Request{NamespacedName: mdb.NamespacedName()}) + makeStatefulSetReady(t, mgr.GetClient(), mdb) assert.NoError(t, err) err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.Members) + assert.Equal(t, 4, mdb.Status.CurrentReplicaSetMembers) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -441,7 +442,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) } func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { @@ -454,7 +455,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) // scale members from three to five mdb.Spec.Members = 5 @@ -472,7 +473,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.Members) + assert.Equal(t, 4, mdb.Status.CurrentReplicaSetMembers) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -484,7 +485,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 5, mdb.Status.Members) + assert.Equal(t, 5, mdb.Status.CurrentReplicaSetMembers) } func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { @@ -521,7 +522,7 @@ func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.Members) + assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) } func TestOpenshift_Configuration(t *testing.T) { @@ -635,8 +636,8 @@ func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { sts := appsv1.StatefulSet{} err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) assert.NoError(t, err) - sts.Status.ReadyReplicas = int32(mdb.ReplicasThisReconciliation()) - sts.Status.UpdatedReplicas = int32(mdb.ReplicasThisReconciliation()) + sts.Status.ReadyReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) + sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) err = c.Update(context.TODO(), &sts) assert.NoError(t, err) } diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index fa49233a4..2d57da761 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -128,9 +128,10 @@ func BasicFunctionality(mdb *mdbv1.MongoDB) func(*testing.T) { }))) t.Run("Test Status Was Updated", Status(mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: mdb.Spec.Members, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: mdb.Spec.Members, + CurrentStatefulSetReplicas: mdb.Spec.Members, })) } } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index b9a61175b..11bade19a 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -72,9 +72,10 @@ func TestReplicaSet(t *testing.T) { t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 2)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, mdbv1.MongoDBStatus{ - MongoURI: mdb0.MongoURI(), - Phase: mdbv1.Running, - Members: 5, + MongoURI: mdb0.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 5, + CurrentStatefulSetReplicas: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) @@ -82,9 +83,10 @@ func TestReplicaSet(t *testing.T) { t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, mdbv1.MongoDBStatus{ - MongoURI: mdb0.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb0.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 5, + CurrentStatefulSetReplicas: 5, })) }) diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go index 5174e7184..a0ab8d855 100644 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go @@ -52,9 +52,10 @@ func TestReplicaSetReadinessProbeScaling(t *testing.T) { t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 3, + CurrentStatefulSetReplicas: 3, })) }) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 30d079502..aa4fd3083 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -49,9 +49,10 @@ func TestReplicaSetScale(t *testing.T) { t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 5, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 5, + CurrentStatefulSetReplicas: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) @@ -59,9 +60,10 @@ func TestReplicaSetScale(t *testing.T) { t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - Members: 3, + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 3, + CurrentStatefulSetReplicas: 3, })) }) } From 31f74a03e5e53d2b0092baecd03b69b9371f4e23 Mon Sep 17 00:00:00 2001 From: chatton Date: Wed, 16 Sep 2020 16:27:23 +0100 Subject: [PATCH 28/39] added CRD updates --- deploy/crds/mongodb.com_mongodb_crd.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/deploy/crds/mongodb.com_mongodb_crd.yaml b/deploy/crds/mongodb.com_mongodb_crd.yaml index 402da921d..f2edee048 100644 --- a/deploy/crds/mongodb.com_mongodb_crd.yaml +++ b/deploy/crds/mongodb.com_mongodb_crd.yaml @@ -175,7 +175,9 @@ spec: status: description: MongoDBStatus defines the observed state of MongoDB properties: - members: + currentReplicaSetMembers: + type: integer + currentStatefulSetReplicas: type: integer message: type: string @@ -183,10 +185,6 @@ spec: type: string phase: type: string - required: - - members - - mongoUri - - phase type: object type: object version: v1 From 62e6693eeef85cef756fc5587c8ef2366c159d24 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 17 Sep 2020 10:25:04 +0100 Subject: [PATCH 29/39] test with readienss probe failure threshold set to high number --- .../mongodb/replica_set_controller.go | 83 +++++++++++-------- 1 file changed, 48 insertions(+), 35 deletions(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 992427b50..9c4c63872 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -200,24 +200,17 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } - // if we're scaling down, we need to wait until the + // if we're scaling down, we need to wait until the StatefulSet is at the + // desired number of replicas. Scaling down has to happen one member at a time if scale.IsScalingDown(mdb) { - isAtDesiredReplicaCount, err := r.isExpectedNumberOfStatefulSetReplicasReady(mdb) + result, err := checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(r.client, r.client.Status(), mdb) + if err != nil { - r.log.Errorf("error: %s", err) - return reconcile.Result{}, err + return result, err } - if !isAtDesiredReplicaCount { - r.log.Infow("not yet at the desired number of replicas, requeuing reconciliation", - "currentReplicas", mdb.CurrentReplicas(), "desiredReplicas", mdb.DesiredReplicas()) - - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Info, fmt.Sprintf("Replica Set is scaling down, currentMembers=%d, desiredMembers=%d", - mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withPendingPhase(10), - ) + if result.Requeue { + return result, nil } } @@ -231,8 +224,8 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) if err != nil { - r.log.Errorf("error updating status: %s", err) - return reconcile.Result{}, err + r.log.Errorf("Error updating status: %s", err) + return failedResult() } r.log.Debug("Validating TLS Config") @@ -280,15 +273,11 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } - currentSts := appsv1.StatefulSet{} - if err := r.client.Get(context.TODO(), mdb.NamespacedName(), ¤tSts); err != nil { - errMsg := err.Error() - if !apiErrors.IsNotFound(err) { - errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) - } + currentSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + if err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withMessage(Error, errMsg). + withMessage(Error, fmt.Sprintf("Error getting StatefulSet: %s", err)). withFailedPhase(), ) } @@ -381,6 +370,41 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R return res, err } +// checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig ensures that the expected number of StatefulSet +// replicas are ready. When a member has its process removed from the Automation Config, the pod will eventually +// become unready. We use this information to determine if we are safe to continue the reconciliation process. +func checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(stsGetter statefulset.Getter, statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) (reconcile.Result, error) { + isAtDesiredReplicaCount, err := areExpectedNumberOfStatefulSetReplicasReady(stsGetter, mdb) + if err != nil { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error determining state of StatefulSet: %s", err)). + withFailedPhase(), + ) + } + + if !isAtDesiredReplicaCount { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("Not yet at the desired number of replicas, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withPendingPhase(10), + ) + } + return okResult() +} + +// areExpectedNumberOfStatefulSetReplicasReady checks to see if the StatefulSet corresponding +// to the given MongoDB resource has the expected number of ready replicas. +func areExpectedNumberOfStatefulSetReplicasReady(stsGetter statefulset.Getter, mdb mdbv1.MongoDB) (bool, error) { + sts, err := stsGetter.GetStatefulSet(mdb.NamespacedName()) + if err != nil { + return false, err + } + desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) + return sts.Status.ReadyReplicas == desiredReadyReplicas, nil +} + // resetStatefulSetUpdateStrategy ensures the stateful set is configured back to using RollingUpdateStatefulSetStrategyType // and does not keep using OnDelete func (r *ReplicaSetReconciler) resetStatefulSetUpdateStrategy(mdb mdbv1.MongoDB) error { @@ -435,17 +459,6 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta return areEqual && isReady, nil } -func (r *ReplicaSetReconciler) isExpectedNumberOfStatefulSetReplicasReady(mdb mdbv1.MongoDB) (bool, error) { - sts, err := r.client.GetStatefulSet(mdb.NamespacedName()) - if err != nil { - return false, err - } - desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) - r.log.Infow("isExpectedNumberOfStatefulSetReplicasReady", "desiredReadyReplicas", desiredReadyReplicas) - - return sts.Status.ReadyReplicas == desiredReadyReplicas, nil -} - func (r *ReplicaSetReconciler) ensureService(mdb mdbv1.MongoDB) error { svc := buildService(mdb) err := r.client.Create(context.TODO(), &svc) @@ -824,7 +837,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(15), + probes.WithFailureThreshold(240), probes.WithInitialDelaySeconds(5), ) } From 13b0031f000dae9215b10332a84e7f917f6713bd Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 17 Sep 2020 13:47:38 +0100 Subject: [PATCH 30/39] increase timeouts for scale down drastically --- .../mongodb/mongodb_status_options.go | 34 +++++-------------- .../mongodb/replica_set_controller.go | 13 +++---- .../reconciliationresults.go | 23 +++++++++++++ test/e2e/mongodbtests/mongodbtests.go | 15 +++++++- .../replica_set_multiple_test.go | 2 +- .../replica_set_scaling_test.go | 2 +- 6 files changed, 53 insertions(+), 36 deletions(-) create mode 100644 pkg/util/resconciliationresult/reconciliationresults.go diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index 90ee13ef9..d89ad761f 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -1,9 +1,8 @@ package mongodb import ( - "time" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/resconciliationresult" "go.uber.org/zap" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -57,7 +56,7 @@ func (m mongoUriOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (m mongoUriOption) GetResult() (reconcile.Result, error) { - return okResult() + return resconciliationresult.OK() } func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { @@ -95,7 +94,7 @@ func (m messageOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (m messageOption) GetResult() (reconcile.Result, error) { - return okResult() + return resconciliationresult.OK() } func (o *optionBuilder) withAutomationConfigMembers(members int) *optionBuilder { @@ -145,15 +144,15 @@ func (p phaseOption) ApplyOption(mdb *mdbv1.MongoDB) { func (p phaseOption) GetResult() (reconcile.Result, error) { if p.phase == mdbv1.Running { - return okResult() + return resconciliationresult.OK() } if p.phase == mdbv1.Pending { - return retryResult(p.retryAfter) + return resconciliationresult.Retry(p.retryAfter) } if p.phase == mdbv1.Failed { - return failedResult() + return resconciliationresult.Failed() } - return okResult() + return resconciliationresult.OK() } type automationConfigReplicasOption struct { @@ -165,7 +164,7 @@ func (a automationConfigReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (a automationConfigReplicasOption) GetResult() (reconcile.Result, error) { - return okResult() + return resconciliationresult.OK() } type statefulSetReplicasOption struct { @@ -177,20 +176,5 @@ func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { - return okResult() -} - -// helper functions which return reconciliation results which should be -// returned from the main reconciliation loop - -func okResult() (reconcile.Result, error) { - return reconcile.Result{}, nil -} - -func retryResult(after int) (reconcile.Result, error) { - return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil -} - -func failedResult() (reconcile.Result, error) { - return retryResult(0) + return resconciliationresult.OK() } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 9c4c63872..df762b2f7 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -9,6 +9,8 @@ import ( "os" "strings" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/resconciliationresult" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" @@ -204,14 +206,9 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R // desired number of replicas. Scaling down has to happen one member at a time if scale.IsScalingDown(mdb) { result, err := checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(r.client, r.client.Status(), mdb) - - if err != nil { + if resconciliationresult.ShouldRequeue(result, err) { return result, err } - - if result.Requeue { - return result, nil - } } // at this stage we know we have successfully updated the automation config with the correct number of @@ -225,7 +222,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err != nil { r.log.Errorf("Error updating status: %s", err) - return failedResult() + return resconciliationresult.Failed() } r.log.Debug("Validating TLS Config") @@ -391,7 +388,7 @@ func checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(stsGetter s withPendingPhase(10), ) } - return okResult() + return resconciliationresult.OK() } // areExpectedNumberOfStatefulSetReplicasReady checks to see if the StatefulSet corresponding diff --git a/pkg/util/resconciliationresult/reconciliationresults.go b/pkg/util/resconciliationresult/reconciliationresults.go new file mode 100644 index 000000000..74a474d96 --- /dev/null +++ b/pkg/util/resconciliationresult/reconciliationresults.go @@ -0,0 +1,23 @@ +package resconciliationresult + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func ShouldRequeue(result reconcile.Result, err error) bool { + return err != nil || result.Requeue || result.RequeueAfter > 0 +} + +func OK() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func Retry(after int) (reconcile.Result, error) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil +} + +func Failed() (reconcile.Result, error) { + return Retry(0) +} diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index 2d57da761..67190002e 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -29,8 +29,21 @@ import ( // StatefulSetIsReady ensures that the underlying stateful set // reaches the running state func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { + return statefulSetIsReady(mdb, time.Second*15, time.Minute*5) +} + +// StatefulSetIsReadyAfterScaleDown ensures that a replica set is scaled down correctly +// note: scaling down takes considerably longer than scaling up due the readiness probe +// failure threshold being high +func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDB) func(t *testing.T) { + return statefulSetIsReady(mdb, time.Second*15, time.Minute*20) +} + +// StatefulSetIsReady ensures that the underlying stateful set +// reaches the running state +func statefulSetIsReady(mdb *mdbv1.MongoDB, interval time.Duration, timeout time.Duration) func(t *testing.T) { return func(t *testing.T) { - err := e2eutil.WaitForStatefulSetToBeReady(t, mdb, time.Second*15, time.Minute*5) + err := e2eutil.WaitForStatefulSetToBeReady(t, mdb, interval, timeout) if err != nil { t.Fatal(err) } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index 11bade19a..bb67e0547 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -78,7 +78,7 @@ func TestReplicaSet(t *testing.T) { CurrentStatefulSetReplicas: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb0)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index aa4fd3083..196f80496 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -55,7 +55,7 @@ func TestReplicaSetScale(t *testing.T) { CurrentStatefulSetReplicas: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, From 82521805229be4959821d3c3100ceb12a8b20f2a Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 18 Sep 2020 09:06:14 +0100 Subject: [PATCH 31/39] tweak readiness probe values, added test just for scale down --- .evergreen.yml | 8 +++ .../mongodb/replica_set_controller.go | 2 +- test/e2e/mongodbtests/mongodbtests.go | 2 +- .../replica_set_multiple_test.go | 28 +++++---- .../replica_set_scaling_test.go | 28 +++++---- .../replica_set_scale_down_test.go | 60 +++++++++++++++++++ 6 files changed, 100 insertions(+), 28 deletions(-) create mode 100644 test/e2e/replica_set_scale_down/replica_set_scale_down_test.go diff --git a/.evergreen.yml b/.evergreen.yml index b4a558a78..31de8ea35 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -167,6 +167,7 @@ task_groups: - e2e_test_replica_set - e2e_test_replica_set_readiness_probe - e2e_test_replica_set_scale + - e2e_test_replica_set_scale_down - e2e_test_replica_set_change_version - e2e_test_feature_compatibility_version - e2e_test_feature_compatibility_version_upgrade @@ -279,6 +280,13 @@ tasks: vars: test: replica_set_scale + - name: e2e_test_replica_set_scale_down + exec_timeout_secs: 3000 + commands: + - func: run_e2e_test + vars: + test: replica_set_scale_down + - name: e2e_test_replica_set_change_version commands: - func: run_e2e_test diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index df762b2f7..d0208a067 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -834,7 +834,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(240), + probes.WithFailureThreshold(60), probes.WithInitialDelaySeconds(5), ) } diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index 67190002e..9262c0ada 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -36,7 +36,7 @@ func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { // note: scaling down takes considerably longer than scaling up due the readiness probe // failure threshold being high func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDB) func(t *testing.T) { - return statefulSetIsReady(mdb, time.Second*15, time.Minute*20) + return statefulSetIsReady(mdb, time.Second*60, time.Minute*45) } // StatefulSetIsReady ensures that the underlying stateful set diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index bb67e0547..cb4327837 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -17,9 +17,9 @@ func TestMain(m *testing.M) { f.MainEntry(m) } -// TestReplicaSet creates two MongoDB resources that are handled by the Operator at the +// TestReplicaSetMultiple creates two MongoDB resources that are handled by the Operator at the // same time. One of them is scaled to 5 and then back to 3 -func TestReplicaSet(t *testing.T) { +func TestReplicaSetMultiple(t *testing.T) { ctx, shouldCleanup := setup.InitTest(t) @@ -77,17 +77,19 @@ func TestReplicaSet(t *testing.T) { CurrentReplicaSetMembers: 5, CurrentStatefulSetReplicas: 5, })) - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb0)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, - mdbv1.MongoDBStatus{ - MongoURI: mdb0.MongoURI(), - Phase: mdbv1.Running, - CurrentReplicaSetMembers: 5, - CurrentStatefulSetReplicas: 5, - })) + + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb0)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb0.MongoURI(), + // Phase: mdbv1.Running, + // CurrentReplicaSetMembers: 5, + // CurrentStatefulSetReplicas: 5, + // })) }) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 196f80496..fb94bc407 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -1,4 +1,4 @@ -package replica_set_readiness_probe +package replica_set_scale_up import ( "testing" @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { f.MainEntry(m) } -func TestReplicaSetScale(t *testing.T) { +func TestReplicaSetScaleUp(t *testing.T) { ctx, shouldCleanup := setup.InitTest(t) @@ -54,16 +54,18 @@ func TestReplicaSetScale(t *testing.T) { CurrentReplicaSetMembers: 5, CurrentStatefulSetReplicas: 5, })) - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - CurrentReplicaSetMembers: 3, - CurrentStatefulSetReplicas: 3, - })) + + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb.MongoURI(), + // Phase: mdbv1.Running, + // CurrentReplicaSetMembers: 3, + // CurrentStatefulSetReplicas: 3, + // })) }) } diff --git a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go new file mode 100644 index 000000000..6b32cb234 --- /dev/null +++ b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go @@ -0,0 +1,60 @@ +package replica_set_scale_down + +import ( + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + f "github.com/operator-framework/operator-sdk/pkg/test" +) + +func TestMain(m *testing.M) { + f.MainEntry(m) +} + +func TestReplicaSetScaleDown(t *testing.T) { + ctx, shouldCleanup := setup.InitTest(t) + + if shouldCleanup { + defer ctx.Cleanup() + } + mdb, user := e2eutil.NewTestMongoDB("replica-set-scale-down") + + _, err := setup.GeneratePasswordForUser(user, ctx) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + + t.Run("MongoDB is reachable", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 1)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, + mdbv1.MongoDBStatus{ + MongoURI: mdb.MongoURI(), + Phase: mdbv1.Running, + CurrentReplicaSetMembers: 1, + CurrentStatefulSetReplicas: 1, + })) + }) +} From ae19bde2d1023e291b8d6c030d35f3df051c8889 Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 18 Sep 2020 10:36:31 +0100 Subject: [PATCH 32/39] increase timeout for e2e test --- cmd/testrunner/main.go | 3 ++- test/e2e/replica_set_multiple/replica_set_multiple_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go index 192a2c707..e5bef059c 100644 --- a/cmd/testrunner/main.go +++ b/cmd/testrunner/main.go @@ -271,7 +271,8 @@ func withTest(test string) func(obj runtime.Object) { "--kubeconfig", "/etc/config/kubeconfig", "--go-test-flags", - "-timeout=40m", + // TODO: allow this to be configurable per test, this is only large due to scale down test + "-timeout=60m", } } } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index cb4327837..75f92451c 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -69,7 +69,7 @@ func TestReplicaSetMultiple(t *testing.T) { t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb0, 5)) t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 2)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), From 935e9f99b1493fbdb652849838d6fdb7900f3dac Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 18 Sep 2020 17:21:59 +0100 Subject: [PATCH 33/39] wip --- .evergreen.yml | 2 +- pkg/controller/mongodb/replica_set_controller.go | 4 ++-- test/e2e/e2eutil.go | 11 ++++++++++- test/e2e/mongodbtests/mongodbtests.go | 8 +++++++- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index 31de8ea35..99bda60b7 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -281,7 +281,7 @@ tasks: test: replica_set_scale - name: e2e_test_replica_set_scale_down - exec_timeout_secs: 3000 + exec_timeout_secs: 3600 commands: - func: run_e2e_test vars: diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index d0208a067..016a22b82 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -435,7 +435,7 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) + isReady := int32(mdb.StatefulSetReplicasThisReconciliation()) == existingStatefulSet.Status.ReadyReplicas if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ @@ -834,7 +834,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(60), + probes.WithFailureThreshold(15), probes.WithInitialDelaySeconds(5), ) } diff --git a/test/e2e/e2eutil.go b/test/e2e/e2eutil.go index ec8a55890..6325aaf54 100644 --- a/test/e2e/e2eutil.go +++ b/test/e2e/e2eutil.go @@ -91,6 +91,14 @@ func WaitForStatefulSetToBeReady(t *testing.T, mdb *mdbv1.MongoDB, retryInterval }) } +// WaitForStatefulSetToBeReadyAfterScaleDown waits for just the ready replicas to be correct +// and does not account for the updated replicas +func WaitForStatefulSetToBeReadyAfterScaleDown(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration) error { + return waitForStatefulSetCondition(t, mdb, retryInterval, timeout, func(sts appsv1.StatefulSet) bool { + return int32(mdb.Spec.Members) == sts.Status.ReadyReplicas + }) +} + func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration, condition func(set appsv1.StatefulSet) bool) error { _, err := WaitForStatefulSetToExist(mdb.Name, retryInterval, timeout) if err != nil { @@ -103,7 +111,8 @@ func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDB, retryInterval if err != nil { return false, err } - t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d\n", mdb.Name, mdb.Spec.Members, sts.Status.ReadyReplicas) + t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d, Current updated replicas: %d\n", + mdb.Name, mdb.Spec.Members, sts.Status.ReadyReplicas, sts.Status.UpdatedReplicas) ready := condition(sts) return ready, nil }) diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index 9262c0ada..5e87c7081 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -36,7 +36,13 @@ func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { // note: scaling down takes considerably longer than scaling up due the readiness probe // failure threshold being high func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDB) func(t *testing.T) { - return statefulSetIsReady(mdb, time.Second*60, time.Minute*45) + return func(t *testing.T) { + err := e2eutil.WaitForStatefulSetToBeReadyAfterScaleDown(t, mdb, time.Second*60, time.Minute*45) + if err != nil { + t.Fatal(err) + } + t.Logf("StatefulSet %s/%s is ready!", mdb.Namespace, mdb.Name) + } } // StatefulSetIsReady ensures that the underlying stateful set From c36a4a08f556b456260b85b2ee8c970aaf8a5466 Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 18 Sep 2020 17:22:26 +0100 Subject: [PATCH 34/39] set failureThreshold to 60 --- pkg/controller/mongodb/replica_set_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 016a22b82..1d5c9487c 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -834,7 +834,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(15), + probes.WithFailureThreshold(60), probes.WithInitialDelaySeconds(5), ) } From 95c7c1a37b3e79ecf35fa67951b385c4935fa36b Mon Sep 17 00:00:00 2001 From: chatton Date: Fri, 18 Sep 2020 17:35:20 +0100 Subject: [PATCH 35/39] wip --- pkg/controller/mongodb/replica_set_controller.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 1d5c9487c..a8b035f23 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -435,7 +435,13 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := int32(mdb.StatefulSetReplicasThisReconciliation()) == existingStatefulSet.Status.ReadyReplicas + isReady := false + if scale.IsScalingDown(mdb) || existingStatefulSet.Status.ReadyReplicas == 1 { + isReady = int32(mdb.StatefulSetReplicasThisReconciliation()) == existingStatefulSet.Status.ReadyReplicas + } else { + isReady = statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) + } + if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") annotations := map[string]string{ From 969b2ba7e64de0dd00218c80e680467076221630 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 21 Sep 2020 09:01:27 +0100 Subject: [PATCH 36/39] Added unit tests and cleaned up code --- deploy/operator.yaml | 2 +- .../mongodb/mongodb_status_options.go | 18 ++-- .../mongodb/replica_set_controller.go | 82 ++++------------ .../mongodb/replicaset_controller_test.go | 12 ++- .../mongodb/replicaset_scaledown.go | 58 ++++++++++++ .../mongodb/replicaset_scaledown_test.go | 93 +++++++++++++++++++ .../reconciliationresults.go | 2 +- 7 files changed, 192 insertions(+), 75 deletions(-) create mode 100644 pkg/controller/mongodb/replicaset_scaledown.go create mode 100644 pkg/controller/mongodb/replicaset_scaledown_test.go rename pkg/util/{resconciliationresult => result}/reconciliationresults.go (94%) diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 6392551e9..56f163405 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -31,7 +31,7 @@ spec: - name: OPERATOR_NAME value: "mongodb-kubernetes-operator" - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/mongodb/mongodb-agent-dev:scaletest + value: quay.io/mongodb/mongodb-agent-dev:scaledown # value: quay.io/mongodb/mongodb-agent:10.19.0.6562-1 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index d89ad761f..5298f7715 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -2,7 +2,7 @@ package mongodb import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/resconciliationresult" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" "go.uber.org/zap" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" @@ -56,7 +56,7 @@ func (m mongoUriOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (m mongoUriOption) GetResult() (reconcile.Result, error) { - return resconciliationresult.OK() + return result.OK() } func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { @@ -94,7 +94,7 @@ func (m messageOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (m messageOption) GetResult() (reconcile.Result, error) { - return resconciliationresult.OK() + return result.OK() } func (o *optionBuilder) withAutomationConfigMembers(members int) *optionBuilder { @@ -144,15 +144,15 @@ func (p phaseOption) ApplyOption(mdb *mdbv1.MongoDB) { func (p phaseOption) GetResult() (reconcile.Result, error) { if p.phase == mdbv1.Running { - return resconciliationresult.OK() + return result.OK() } if p.phase == mdbv1.Pending { - return resconciliationresult.Retry(p.retryAfter) + return result.Retry(p.retryAfter) } if p.phase == mdbv1.Failed { - return resconciliationresult.Failed() + return result.Failed() } - return resconciliationresult.OK() + return result.OK() } type automationConfigReplicasOption struct { @@ -164,7 +164,7 @@ func (a automationConfigReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (a automationConfigReplicasOption) GetResult() (reconcile.Result, error) { - return resconciliationresult.OK() + return result.OK() } type statefulSetReplicasOption struct { @@ -176,5 +176,5 @@ func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { } func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { - return resconciliationresult.OK() + return result.OK() } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index a8b035f23..7363e2ff5 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/resconciliationresult" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" @@ -171,18 +171,15 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue - return reconcile.Result{}, nil + return result.OK() } r.log.Errorf("Error reconciling MongoDB resource: %s", err) // Error reading the object - requeue the request. - return reconcile.Result{}, err + return result.Failed() } r.log = zap.S().With("ReplicaSet", request.NamespacedName) - r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status, - "desiredMembers", mdb.DesiredReplicas(), - "currentMembers", mdb.CurrentReplicas(), - ) + r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) r.log.Debug("Ensuring Automation Config for deployment") if err := r.ensureAutomationConfig(mdb); err != nil { @@ -205,24 +202,25 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R // if we're scaling down, we need to wait until the StatefulSet is at the // desired number of replicas. Scaling down has to happen one member at a time if scale.IsScalingDown(mdb) { - result, err := checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(r.client, r.client.Status(), mdb) - if resconciliationresult.ShouldRequeue(result, err) { - return result, err + res, err := checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(r.client, r.client.Status(), mdb) + + if err != nil { + r.log.Errorf("Error checking if StatefulSet members have been removed from the automation config: %s", err) + return result.Failed() + } + + if result.ShouldRequeue(res, err) { + r.log.Debugf("The expected number of Stateful Set members for scale down are not yet ready, requeuing reconciliation") + return result.Retry(10) } } // at this stage we know we have successfully updated the automation config with the correct number of // members and the stateful set has the expected number of ready replicas. We can update our status // so we calculate these fields correctly going forward - _, err = status.Update(r.client.Status(), &mdb, - statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). - withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()), - ) - - if err != nil { - r.log.Errorf("Error updating status: %s", err) - return resconciliationresult.Failed() + if err := updateScalingStatus(r.client.Status(), mdb); err != nil { + r.log.Errorf("Failed updating the status of the MongoDB resource: %s", err) + return result.Failed() } r.log.Debug("Validating TLS Config") @@ -367,41 +365,6 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R return res, err } -// checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig ensures that the expected number of StatefulSet -// replicas are ready. When a member has its process removed from the Automation Config, the pod will eventually -// become unready. We use this information to determine if we are safe to continue the reconciliation process. -func checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(stsGetter statefulset.Getter, statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) (reconcile.Result, error) { - isAtDesiredReplicaCount, err := areExpectedNumberOfStatefulSetReplicasReady(stsGetter, mdb) - if err != nil { - return status.Update(statusWriter, &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error determining state of StatefulSet: %s", err)). - withFailedPhase(), - ) - } - - if !isAtDesiredReplicaCount { - return status.Update(statusWriter, &mdb, - statusOptions(). - withMessage(Info, fmt.Sprintf("Not yet at the desired number of replicas, currentMembers=%d, desiredMembers=%d", - mdb.CurrentReplicas(), mdb.DesiredReplicas())). - withPendingPhase(10), - ) - } - return resconciliationresult.OK() -} - -// areExpectedNumberOfStatefulSetReplicasReady checks to see if the StatefulSet corresponding -// to the given MongoDB resource has the expected number of ready replicas. -func areExpectedNumberOfStatefulSetReplicasReady(stsGetter statefulset.Getter, mdb mdbv1.MongoDB) (bool, error) { - sts, err := stsGetter.GetStatefulSet(mdb.NamespacedName()) - if err != nil { - return false, err - } - desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) - return sts.Status.ReadyReplicas == desiredReadyReplicas, nil -} - // resetStatefulSetUpdateStrategy ensures the stateful set is configured back to using RollingUpdateStatefulSetStrategyType // and does not keep using OnDelete func (r *ReplicaSetReconciler) resetStatefulSetUpdateStrategy(mdb mdbv1.MongoDB) error { @@ -435,12 +398,7 @@ func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB, existingSta //some issues with nil/empty maps not being compared correctly otherwise areEqual := bytes.Equal(stsCopyBytes, stsBytes) - isReady := false - if scale.IsScalingDown(mdb) || existingStatefulSet.Status.ReadyReplicas == 1 { - isReady = int32(mdb.StatefulSetReplicasThisReconciliation()) == existingStatefulSet.Status.ReadyReplicas - } else { - isReady = statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) - } + isReady := statefulset.IsReady(*existingStatefulSet, mdb.StatefulSetReplicasThisReconciliation()) if existingStatefulSet.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType && !isReady { r.log.Info("StatefulSet has left ready state, version upgrade in progress") @@ -792,8 +750,6 @@ func buildStatefulSetModificationFunction(mdb mdbv1.MongoDB) statefulset.Modific dataVolume := statefulset.CreateVolumeMount(dataVolumeName, "/data") - zap.S().Debugw("BuildingStatefulSet", "replicas", mdb.StatefulSetReplicasThisReconciliation()) - return statefulset.Apply( statefulset.WithName(mdb.Name), statefulset.WithNamespace(mdb.Namespace), @@ -840,7 +796,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(60), + probes.WithFailureThreshold(60), // TODO: this value needs further consideration probes.WithInitialDelaySeconds(5), ) } diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index 4d583866f..180bbd0dc 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -637,10 +637,20 @@ func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err e // makeStatefulSetReady updates the StatefulSet corresponding to the // provided MongoDB resource to mark it as ready for the case of `statefulset.IsReady` func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { + setStatefulSetReadyReplicas(t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) +} + +// makeStatefulSetUnReady updates the StatefulSet corresponding to the +// provided MongoDB resource to mark it as unready. +func makeStatefulSetUnReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB) { + setStatefulSetReadyReplicas(t, c, mdb, 0) +} + +func setStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDB, readyReplicas int) { sts := appsv1.StatefulSet{} err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) assert.NoError(t, err) - sts.Status.ReadyReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) + sts.Status.ReadyReplicas = int32(readyReplicas) sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) err = c.Update(context.TODO(), &sts) assert.NoError(t, err) diff --git a/pkg/controller/mongodb/replicaset_scaledown.go b/pkg/controller/mongodb/replicaset_scaledown.go new file mode 100644 index 000000000..257f28132 --- /dev/null +++ b/pkg/controller/mongodb/replicaset_scaledown.go @@ -0,0 +1,58 @@ +package mongodb + +import ( + "fmt" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// updateScalingStatus updates the status fields which are required to keep track of the current +// scaling state of the resource +func updateScalingStatus(statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) error { + _, err := status.Update(statusWriter, &mdb, + statusOptions(). + withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()), + ) + return err +} + +// checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig ensures that the expected number of StatefulSet +// replicas are ready. When a member has its process removed from the Automation Config, the pod will eventually +// become unready. We use this information to determine if we are safe to continue the reconciliation process. +func checkIfStatefulSetMembersHaveBeenRemovedFromTheAutomationConfig(stsGetter statefulset.Getter, statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) (reconcile.Result, error) { + isAtDesiredReplicaCount, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(stsGetter, mdb) + if err != nil { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error determining state of StatefulSet: %s", err)). + withFailedPhase(), + ) + } + + if !isAtDesiredReplicaCount { + return status.Update(statusWriter, &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("Not yet at the desired number of replicas, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withPendingPhase(10), + ) + } + return result.OK() +} + +// hasReachedDesiredNumberOfStatefulSetReplicasReady checks to see if the StatefulSet corresponding +// to the given MongoDB resource has the expected number of ready replicas. +func hasReachedDesiredNumberOfStatefulSetReplicasReady(stsGetter statefulset.Getter, mdb mdbv1.MongoDB) (bool, error) { + sts, err := stsGetter.GetStatefulSet(mdb.NamespacedName()) + if err != nil { + return false, err + } + desiredReadyReplicas := int32(mdb.StatefulSetReplicasThisReconciliation()) + return sts.Status.ReadyReplicas == desiredReadyReplicas, nil +} diff --git a/pkg/controller/mongodb/replicaset_scaledown_test.go b/pkg/controller/mongodb/replicaset_scaledown_test.go new file mode 100644 index 000000000..0e4c7a7c7 --- /dev/null +++ b/pkg/controller/mongodb/replicaset_scaledown_test.go @@ -0,0 +1,93 @@ +package mongodb + +import ( + "context" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestUpdateScalingStatus(t *testing.T) { + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + + assert.Equal(t, 0, mdb.Status.CurrentStatefulSetReplicas) + assert.Equal(t, 0, mdb.Status.CurrentReplicaSetMembers) + + expectedAutomationConfigMembers := mdb.AutomationConfigMembersThisReconciliation() + expectedStatefulSetReplicas := mdb.StatefulSetReplicasThisReconciliation() + + err := updateScalingStatus(mgr.Client, mdb) + assert.NoError(t, err) + + err = mgr.Client.Get(context.TODO(), mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, expectedAutomationConfigMembers, mdb.Status.CurrentStatefulSetReplicas) + assert.Equal(t, expectedStatefulSetReplicas, mdb.Status.CurrentReplicaSetMembers) +} + +func TestHasReachedDesiredNumberOfStatefulSetReplicasReady(t *testing.T) { + createStatefulSet := func(c k8sClient.Client, mdb mdbv1.MongoDB) error { + replicas := int32(mdb.Spec.Members) + return c.Create(context.TODO(), &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: mdb.Name, + Namespace: mdb.Namespace, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + }) + } + + t.Run("There is an error when the StatefulSet does not exist", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + + // Act + _, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.Error(t, err) + }) + + t.Run("Returns true when the StatefulSet exists and is ready", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + err := createStatefulSet(mgr.Client, mdb) + assert.NoError(t, err) + makeStatefulSetReady(t, mgr.Client, mdb) + + // Act + hasReached, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.NoError(t, err, "should be no error when the StatefulSet exists") + assert.True(t, hasReached, "Should not be ready when the stateful set is not ready") + }) + + t.Run("Returns false when the StatefulSet exists and is not ready", func(t *testing.T) { + // Arrange + mdb := newTestReplicaSet() + mgr := client.NewManager(&mdb) + err := createStatefulSet(mgr.Client, mdb) + assert.NoError(t, err) + makeStatefulSetUnReady(t, mgr.Client, mdb) + + // Act + hasReached, err := hasReachedDesiredNumberOfStatefulSetReplicasReady(mgr.Client, mdb) + + // Assert + assert.NoError(t, err, "should be no error when the StatefulSet exists") + assert.False(t, hasReached, "Should not be ready when the stateful set is not ready") + }) + +} diff --git a/pkg/util/resconciliationresult/reconciliationresults.go b/pkg/util/result/reconciliationresults.go similarity index 94% rename from pkg/util/resconciliationresult/reconciliationresults.go rename to pkg/util/result/reconciliationresults.go index 74a474d96..2bfbc8b71 100644 --- a/pkg/util/resconciliationresult/reconciliationresults.go +++ b/pkg/util/result/reconciliationresults.go @@ -1,4 +1,4 @@ -package resconciliationresult +package result import ( "time" From e5b1ad2cb6027b31d0fad4de7e19763494b27422 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 21 Sep 2020 09:20:40 +0100 Subject: [PATCH 37/39] removed dead code --- pkg/apis/mongodb/v1/mongodb_types.go | 52 ++++++++++--------- .../mongodb/replica_set_controller.go | 21 +------- 2 files changed, 28 insertions(+), 45 deletions(-) diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index de82e37ad..74f40c660 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -223,32 +223,10 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m MongoDB) DesiredReplicas() int { - return m.Spec.Members -} - -func (m MongoDB) CurrentReplicas() int { - return m.Status.CurrentStatefulSetReplicas -} - -func (m *MongoDB) StatefulSetReplicasThisReconciliation() int { - return scale.ReplicasThisReconciliation(m) -} - -type intScaler struct { - current, desired int -} - -func (a intScaler) DesiredReplicas() int { - return a.desired -} - -func (a intScaler) CurrentReplicas() int { - return a.current -} - func (m MongoDB) AutomationConfigMembersThisReconciliation() int { - return scale.ReplicasThisReconciliation(intScaler{ + // determine the correct number of automation config replica set members + // based on our desired number, and our current number + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ desired: m.Spec.Members, current: m.Status.CurrentReplicaSetMembers, }) @@ -320,6 +298,30 @@ func (m MongoDB) GetFCV() string { return strings.Join(parts[:minorIndex+1], ".") } +func (m MongoDB) DesiredReplicas() int { + return m.Spec.Members +} + +func (m MongoDB) CurrentReplicas() int { + return m.Status.CurrentStatefulSetReplicas +} + +func (m *MongoDB) StatefulSetReplicasThisReconciliation() int { + return scale.ReplicasThisReconciliation(m) +} + +type automationConfigReplicasScaler struct { + current, desired int +} + +func (a automationConfigReplicasScaler) DesiredReplicas() int { + return a.desired +} + +func (a automationConfigReplicasScaler) CurrentReplicas() int { + return a.current +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MongoDBList contains a list of MongoDB diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 7363e2ff5..7116b0af9 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -240,25 +240,6 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R ) } - mdbSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) - if k8sClient.IgnoreNotFound(err) != nil { - return reconcile.Result{}, err - } - - readyToUpdateSts := true - if mdbSts.Status.Replicas > int32(mdb.StatefulSetReplicasThisReconciliation()) { - readyToUpdateSts = mdbSts.Status.ReadyReplicas == int32(mdb.StatefulSetReplicasThisReconciliation()) - r.log.Debugw("ReplicaSet Scaling", "readyReplicas", mdbSts.Status.Replicas, "replicasThisReconciliation", mdb.StatefulSetReplicasThisReconciliation()) - } - - if !readyToUpdateSts { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Debug, fmt.Sprintf("Not ready to update StatefulSet: %s", mdb.NamespacedName())). - withPendingPhase(5), - ) - } - r.log.Debug("Creating/Updating StatefulSet") if err := r.createOrUpdateStatefulSet(mdb); err != nil { return status.Update(r.client.Status(), &mdb, @@ -796,7 +777,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(60), // TODO: this value needs further consideration + probes.WithFailureThreshold(15), // TODO: this value needs further consideration probes.WithInitialDelaySeconds(5), ) } From a9b892c8974ab2b93ca0fdea87fc39082d6de9be Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 21 Sep 2020 09:47:05 +0100 Subject: [PATCH 38/39] updated readiness failure threshold --- pkg/controller/mongodb/replica_set_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 7116b0af9..8865170a7 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -777,7 +777,7 @@ func getDomain(service, namespace, clusterName string) string { func defaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(15), // TODO: this value needs further consideration + probes.WithFailureThreshold(60), // TODO: this value needs further consideration probes.WithInitialDelaySeconds(5), ) } From 6b0417699bf3c527c15e6c0b93f67bf008bd0db6 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 24 Sep 2020 07:42:16 +0100 Subject: [PATCH 39/39] udpated after PR feedback --- deploy/crds/mongodb.com_mongodb_crd.yaml | 4 ++-- pkg/apis/mongodb/v1/mongodb_types.go | 4 ++-- pkg/controller/mongodb/mongodb_status_options.go | 16 ++++++++-------- pkg/controller/mongodb/replica_set_controller.go | 10 +++++----- .../mongodb/replicaset_controller_test.go | 16 +++++++--------- pkg/controller/mongodb/replicaset_scaledown.go | 2 +- .../mongodb/replicaset_scaledown_test.go | 4 ++-- test/e2e/mongodbtests/mongodbtests.go | 2 +- .../replica_set_multiple_test.go | 4 ++-- .../replica_set_readiness_probe_test.go | 2 +- .../replica_set_scaling_test.go | 4 ++-- .../replica_set_scale_down_test.go | 2 +- 12 files changed, 34 insertions(+), 36 deletions(-) diff --git a/deploy/crds/mongodb.com_mongodb_crd.yaml b/deploy/crds/mongodb.com_mongodb_crd.yaml index f2edee048..b7143e0e6 100644 --- a/deploy/crds/mongodb.com_mongodb_crd.yaml +++ b/deploy/crds/mongodb.com_mongodb_crd.yaml @@ -175,10 +175,10 @@ spec: status: description: MongoDBStatus defines the observed state of MongoDB properties: - currentReplicaSetMembers: - type: integer currentStatefulSetReplicas: type: integer + currentMongoDBMembers: + type: integer message: type: string mongoUri: diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index 74f40c660..e87d46981 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -203,7 +203,7 @@ type MongoDBStatus struct { Phase Phase `json:"phase"` CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"` - CurrentReplicaSetMembers int `json:"currentReplicaSetMembers"` + CurrentMongoDBMembers int `json:"currentMongoDBMembers"` Message string `json:"message,omitempty"` } @@ -228,7 +228,7 @@ func (m MongoDB) AutomationConfigMembersThisReconciliation() int { // based on our desired number, and our current number return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ desired: m.Spec.Members, - current: m.Status.CurrentReplicaSetMembers, + current: m.Status.CurrentMongoDBMembers, }) } diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go index 5298f7715..c0fcc9f66 100644 --- a/pkg/controller/mongodb/mongodb_status_options.go +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -97,9 +97,9 @@ func (m messageOption) GetResult() (reconcile.Result, error) { return result.OK() } -func (o *optionBuilder) withAutomationConfigMembers(members int) *optionBuilder { - o.options = append(o.options, automationConfigReplicasOption{ - replicaSetMembers: members, +func (o *optionBuilder) withMongoDBMembers(members int) *optionBuilder { + o.options = append(o.options, mongoDBReplicasOption{ + mongoDBMembers: members, }) return o } @@ -155,15 +155,15 @@ func (p phaseOption) GetResult() (reconcile.Result, error) { return result.OK() } -type automationConfigReplicasOption struct { - replicaSetMembers int +type mongoDBReplicasOption struct { + mongoDBMembers int } -func (a automationConfigReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { - mdb.Status.CurrentReplicaSetMembers = a.replicaSetMembers +func (a mongoDBReplicasOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.CurrentMongoDBMembers = a.mongoDBMembers } -func (a automationConfigReplicasOption) GetResult() (reconcile.Result, error) { +func (a mongoDBReplicasOption) GetResult() (reconcile.Result, error) { return result.OK() } diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 8865170a7..f2840ac49 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -283,7 +283,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). withFailedPhase(), ) @@ -298,7 +298,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error setting annotations: %s", err)). withFailedPhase(), ) @@ -307,7 +307,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if err := r.completeTLSRollout(mdb); err != nil { return status.Update(r.client.Status(), &mdb, statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Error, fmt.Sprintf("Error completing TLS rollout: %s", err)). withFailedPhase(), ) @@ -315,7 +315,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R if scale.IsStillScaling(mdb) { return status.Update(r.client.Status(), &mdb, statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). @@ -326,7 +326,7 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R res, err := status.Update(r.client.Status(), &mdb, statusOptions(). withMongoURI(mdb.MongoURI()). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). withMessage(None, ""). withRunningPhase(), diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go index 180bbd0dc..ee846a537 100644 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ b/pkg/controller/mongodb/replicaset_controller_test.go @@ -412,7 +412,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 5, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) // scale members from five to three mdb.Spec.Members = 3 @@ -431,9 +431,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.CurrentReplicaSetMembers) - - makeStatefulSetReady(t, mgr.GetClient(), mdb) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -444,7 +442,7 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { @@ -457,7 +455,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) // scale members from three to five mdb.Spec.Members = 5 @@ -475,7 +473,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, true, res.Requeue) - assert.Equal(t, 4, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) makeStatefulSetReady(t, mgr.GetClient(), mdb) @@ -489,7 +487,7 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. assert.NoError(t, err) assert.Equal(t, false, res.Requeue) - assert.Equal(t, 5, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) } func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDB) { @@ -526,7 +524,7 @@ func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) assert.NoError(t, err) - assert.Equal(t, 3, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestOpenshift_Configuration(t *testing.T) { diff --git a/pkg/controller/mongodb/replicaset_scaledown.go b/pkg/controller/mongodb/replicaset_scaledown.go index 257f28132..ebcfab1b3 100644 --- a/pkg/controller/mongodb/replicaset_scaledown.go +++ b/pkg/controller/mongodb/replicaset_scaledown.go @@ -16,7 +16,7 @@ import ( func updateScalingStatus(statusWriter k8sClient.StatusWriter, mdb mdbv1.MongoDB) error { _, err := status.Update(statusWriter, &mdb, statusOptions(). - withAutomationConfigMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()), ) return err diff --git a/pkg/controller/mongodb/replicaset_scaledown_test.go b/pkg/controller/mongodb/replicaset_scaledown_test.go index 0e4c7a7c7..bfd066c54 100644 --- a/pkg/controller/mongodb/replicaset_scaledown_test.go +++ b/pkg/controller/mongodb/replicaset_scaledown_test.go @@ -17,7 +17,7 @@ func TestUpdateScalingStatus(t *testing.T) { mgr := client.NewManager(&mdb) assert.Equal(t, 0, mdb.Status.CurrentStatefulSetReplicas) - assert.Equal(t, 0, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, 0, mdb.Status.CurrentMongoDBMembers) expectedAutomationConfigMembers := mdb.AutomationConfigMembersThisReconciliation() expectedStatefulSetReplicas := mdb.StatefulSetReplicasThisReconciliation() @@ -29,7 +29,7 @@ func TestUpdateScalingStatus(t *testing.T) { assert.NoError(t, err) assert.Equal(t, expectedAutomationConfigMembers, mdb.Status.CurrentStatefulSetReplicas) - assert.Equal(t, expectedStatefulSetReplicas, mdb.Status.CurrentReplicaSetMembers) + assert.Equal(t, expectedStatefulSetReplicas, mdb.Status.CurrentMongoDBMembers) } func TestHasReachedDesiredNumberOfStatefulSetReplicasReady(t *testing.T) { diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index 5e87c7081..0adc701dc 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -149,7 +149,7 @@ func BasicFunctionality(mdb *mdbv1.MongoDB) func(*testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, - CurrentReplicaSetMembers: mdb.Spec.Members, + CurrentMongoDBMembers: mdb.Spec.Members, CurrentStatefulSetReplicas: mdb.Spec.Members, })) } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index 75f92451c..994ffc966 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -74,7 +74,7 @@ func TestReplicaSetMultiple(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), Phase: mdbv1.Running, - CurrentReplicaSetMembers: 5, + CurrentMongoDBMembers: 5, CurrentStatefulSetReplicas: 5, })) @@ -87,7 +87,7 @@ func TestReplicaSetMultiple(t *testing.T) { // mdbv1.MongoDBStatus{ // MongoURI: mdb0.MongoURI(), // Phase: mdbv1.Running, - // CurrentReplicaSetMembers: 5, + // CurrentMongoDBMembers: 5, // CurrentStatefulSetReplicas: 5, // })) diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go index a0ab8d855..62e44a687 100644 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go @@ -54,7 +54,7 @@ func TestReplicaSetReadinessProbeScaling(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, - CurrentReplicaSetMembers: 3, + CurrentMongoDBMembers: 3, CurrentStatefulSetReplicas: 3, })) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index fb94bc407..21dbff9be 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -51,7 +51,7 @@ func TestReplicaSetScaleUp(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, - CurrentReplicaSetMembers: 5, + CurrentMongoDBMembers: 5, CurrentStatefulSetReplicas: 5, })) @@ -64,7 +64,7 @@ func TestReplicaSetScaleUp(t *testing.T) { // mdbv1.MongoDBStatus{ // MongoURI: mdb.MongoURI(), // Phase: mdbv1.Running, - // CurrentReplicaSetMembers: 3, + // CurrentMongoDBMembers: 3, // CurrentStatefulSetReplicas: 3, // })) }) diff --git a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go index 6b32cb234..0580aedc1 100644 --- a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go +++ b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go @@ -53,7 +53,7 @@ func TestReplicaSetScaleDown(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, - CurrentReplicaSetMembers: 1, + CurrentMongoDBMembers: 1, CurrentStatefulSetReplicas: 1, })) })