diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go index d44ca3293..72a1b81e8 100644 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ b/pkg/apis/mongodb/v1/mongodb_types.go @@ -24,6 +24,8 @@ type Phase string const ( Running Phase = "Running" + Failed Phase = "Failed" + Pending Phase = "Pending" ) const ( @@ -197,6 +199,8 @@ type AuthMode string type MongoDBStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` + Members int `json:"members"` + Message string `json:"message,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -214,11 +218,6 @@ type MongoDB struct { Status MongoDBStatus `json:"status,omitempty"` } -func (m *MongoDB) UpdateSuccess() { - m.Status.MongoURI = m.MongoURI() - m.Status.Phase = Running -} - // MongoURI returns a mongo uri which can be used to connect to this deployment func (m MongoDB) MongoURI() string { members := make([]string, m.Spec.Members) @@ -229,17 +228,6 @@ func (m MongoDB) MongoURI() string { return fmt.Sprintf("mongodb://%s", strings.Join(members, ",")) } -// TODO: this is a temporary function which will be used in the e2e tests -// which will be removed in the following PR to clean up our mongo client testing -func (m MongoDB) SCRAMMongoURI(username, password string) string { - members := make([]string, m.Spec.Members) - clusterDomain := "svc.cluster.local" // TODO: make this configurable - for i := 0; i < m.Spec.Members; i++ { - members[i] = fmt.Sprintf("%s-%d.%s.%s.%s:%d", m.Name, i, m.ServiceName(), m.Namespace, clusterDomain, 27017) - } - return fmt.Sprintf("mongodb://%s:%s@%s/?authMechanism=SCRAM-SHA-256", username, password, strings.Join(members, ",")) -} - func (m MongoDB) Hosts() []string { hosts := make([]string, m.Spec.Members) clusterDomain := "svc.cluster.local" // TODO: make this configurable diff --git a/pkg/controller/mongodb/mongodb_status_options.go b/pkg/controller/mongodb/mongodb_status_options.go new file mode 100644 index 000000000..909a02877 --- /dev/null +++ b/pkg/controller/mongodb/mongodb_status_options.go @@ -0,0 +1,173 @@ +package mongodb + +import ( + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + "go.uber.org/zap" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// severity indicates the severity level +// at which the message should be logged +type severity string + +const ( + Info severity = "INFO" + Warn severity = "WARN" + Error severity = "ERROR" + None severity = "NONE" +) + +// optionBuilder is in charge of constructing a slice of options that +// will be applied on top of the MongoDB resource that has been provided +type optionBuilder struct { + options []status.Option +} + +// GetOptions implements the OptionBuilder interface +func (o *optionBuilder) GetOptions() []status.Option { + return o.options +} + +// options returns an initialized optionBuilder +func statusOptions() *optionBuilder { + return &optionBuilder{ + options: []status.Option{}, + } +} + +func (o *optionBuilder) withMongoURI(uri string) *optionBuilder { + o.options = append(o.options, + mongoUriOption{ + mongoUri: uri, + }) + return o +} + +type mongoUriOption struct { + mongoUri string +} + +func (m mongoUriOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.MongoURI = m.mongoUri +} + +func (m mongoUriOption) GetResult() (reconcile.Result, error) { + return okResult() +} + +func (o *optionBuilder) withMembers(members int) *optionBuilder { + o.options = append(o.options, + membersOption{ + members: members, + }) + return o +} + +type membersOption struct { + members int +} + +func (m membersOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.Members = m.members +} + +func (m membersOption) GetResult() (reconcile.Result, error) { + return okResult() +} +func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { + o.options = append(o.options, + phaseOption{ + phase: phase, + retryAfter: retryAfter, + }) + return o +} + +type message struct { + messageString string + severityLevel severity +} + +type messageOption struct { + message message +} + +func (m messageOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.Message = m.message.messageString + if m.message.severityLevel == Error { + zap.S().Error(m.message) + } + if m.message.severityLevel == Warn { + zap.S().Warn(m.message) + } + if m.message.severityLevel == Info { + zap.S().Info(m.message) + } +} + +func (m messageOption) GetResult() (reconcile.Result, error) { + return okResult() +} + +func (o *optionBuilder) withMessage(severityLevel severity, msg string) *optionBuilder { + o.options = append(o.options, messageOption{ + message: message{ + messageString: msg, + severityLevel: severityLevel, + }, + }) + return o +} + +func (o *optionBuilder) withFailedPhase() *optionBuilder { + return o.withPhase(mdbv1.Failed, 0) +} + +func (o *optionBuilder) withPendingPhase(retryAfter int) *optionBuilder { + return o.withPhase(mdbv1.Pending, retryAfter) +} + +func (o *optionBuilder) withRunningPhase() *optionBuilder { + return o.withPhase(mdbv1.Running, -1) +} + +type phaseOption struct { + phase mdbv1.Phase + retryAfter int +} + +func (p phaseOption) ApplyOption(mdb *mdbv1.MongoDB) { + mdb.Status.Phase = p.phase +} + +func (p phaseOption) GetResult() (reconcile.Result, error) { + if p.phase == mdbv1.Running { + return okResult() + } + if p.phase == mdbv1.Pending { + return retryResult(p.retryAfter) + } + if p.phase == mdbv1.Failed { + return failedResult() + } + return okResult() +} + +// helper functions which return reconciliation results which should be +// returned from the main reconciliation loop + +func okResult() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func retryResult(after int) (reconcile.Result, error) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil +} + +func failedResult() (reconcile.Result, error) { + return retryResult(0) +} diff --git a/pkg/controller/mongodb/mongodb_status_options_test.go b/pkg/controller/mongodb/mongodb_status_options_test.go new file mode 100644 index 000000000..757732159 --- /dev/null +++ b/pkg/controller/mongodb/mongodb_status_options_test.go @@ -0,0 +1,73 @@ +package mongodb + +import ( + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/assert" +) + +func TestMongoUriOption_ApplyOption(t *testing.T) { + + mdb := newReplicaSet(3, "my-rs", "my-ns") + + opt := mongoUriOption{ + mongoUri: "my-uri", + } + + opt.ApplyOption(&mdb) + + assert.Equal(t, "my-uri", mdb.Status.MongoURI, "Status should be updated") +} + +func TestMembersOption_ApplyOption(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + opt := membersOption{ + members: 5, + } + + opt.ApplyOption(&mdb) + + assert.Equal(t, 3, mdb.Spec.Members, "Spec should remain unchanged") + assert.Equal(t, 5, mdb.Status.Members, "Status should be updated") +} + +func TestOptionBuilder_RunningPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + statusOptions().withRunningPhase().GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Running, mdb.Status.Phase) +} + +func TestOptionBuilder_PendingPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + statusOptions().withPendingPhase(10).GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Pending, mdb.Status.Phase) +} + +func TestOptionBuilder_FailedPhase(t *testing.T) { + mdb := newReplicaSet(3, "my-rs", "my-ns") + + statusOptions().withFailedPhase().GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Failed, mdb.Status.Phase) +} + +func newReplicaSet(members int, name, namespace string) mdbv1.MongoDB { + return mdbv1.MongoDB{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: mdbv1.MongoDBSpec{ + Members: members, + }, + } +} diff --git a/pkg/controller/mongodb/replica_set_controller.go b/pkg/controller/mongodb/replica_set_controller.go index 7de0a787d..811740ba9 100644 --- a/pkg/controller/mongodb/replica_set_controller.go +++ b/pkg/controller/mongodb/replica_set_controller.go @@ -8,9 +8,9 @@ import ( "io/ioutil" "os" "strings" - "time" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" "github.com/pkg/errors" @@ -177,56 +177,84 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) if err := r.ensureAutomationConfig(mdb); err != nil { - r.log.Warnf("error creating automation config config map: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("error creating automation config config map: %s", err)). + withFailedPhase(), + ) } r.log.Debug("Ensuring the service exists") if err := r.ensureService(mdb); err != nil { - r.log.Warnf("Error ensuring the service exists: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring the service exists: %s", err)). + withFailedPhase(), + ) } isTLSValid, err := r.validateTLSConfig(mdb) if err != nil { - r.log.Warnf("Error validating TLS config: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). + withFailedPhase(), + ) } if !isTLSValid { - r.log.Infof("TLS config is not yet valid, retrying in 10 seconds") - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). + withPendingPhase(10), + ) } r.log.Debug("Creating/Updating StatefulSet") if err := r.createOrUpdateStatefulSet(mdb); err != nil { - r.log.Warnf("Error creating/updating StatefulSet: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error creating/updating StatefulSet: %s", err)). + withFailedPhase(), + ) } currentSts := appsv1.StatefulSet{} if err := r.client.Get(context.TODO(), mdb.NamespacedName(), ¤tSts); err != nil { + errMsg := err.Error() if !apiErrors.IsNotFound(err) { - r.log.Warnf("Error getting StatefulSet: %s", err) + errMsg = fmt.Sprintf("error getting StatefulSet: %s", err) } - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, statusOptions(). + withMessage(Error, errMsg). + withFailedPhase(), + ) + } r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", getUpdateStrategyType(mdb)) ready, err := r.isStatefulSetReady(mdb, ¤tSts) if err != nil { - r.log.Warnf("Error checking StatefulSet status: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions().withMessage(Error, fmt.Sprintf("Error checking StatefulSet status: %s", err)). + withFailedPhase(), + ) } if !ready { - r.log.Infof("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name) - return reconcile.Result{RequeueAfter: time.Second * 10}, nil + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Info, fmt.Sprintf("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name)). + withPendingPhase(10), + ) } r.log.Debug("Resetting StatefulSet UpdateStrategy") if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { - r.log.Warnf("Error resetting StatefulSet UpdateStrategyType: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). + withFailedPhase(), + ) } r.log.Debug("Setting MongoDB Annotations") @@ -236,24 +264,48 @@ func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.R hasLeftReadyStateAnnotationKey: "false", } if err := r.setAnnotations(mdb.NamespacedName(), annotations); err != nil { - r.log.Warnf("Error setting annotations: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error setting annotations: %s", err)). + withFailedPhase(), + ) } if err := r.completeTLSRollout(mdb); err != nil { - r.log.Warnf("Error completing TLS rollout: %s", err) - return reconcile.Result{}, err + return status.Update(r.client.Status(), &mdb, + statusOptions(). + withMessage(Error, fmt.Sprintf("Error completing TLS rollout: %s", err)). + withFailedPhase(), + ) } r.log.Debug("Updating MongoDB Status") - newStatus, err := r.updateAndReturnStatusSuccess(&mdb) + if err := r.client.Get(context.TODO(), mdb.NamespacedName(), &mdb); err != nil { + return status.Update(r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("could not get get resource: %s", err)). + withFailedPhase(), + ) + } + + res, err := status.Update(r.client.Status(), &mdb, statusOptions(). + withMongoURI(mdb.MongoURI()). + withMembers(mdb.Spec.Members). + withMessage(None, ""). + withRunningPhase(), + ) + if err != nil { - r.log.Warnf("Error updating the status of the MongoDB resource: %s", err) - return reconcile.Result{}, err + r.log.Errorf("Error updating the status of the MongoDB resource: %s", err) + return res, err + } + + if res.RequeueAfter > 0 || res.Requeue { + r.log.Infow("Requeuing reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status:", mdb.Status) + return res, nil } - r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", newStatus) - return reconcile.Result{}, nil + r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status:", mdb.Status) + return res, err } // resetStatefulSetUpdateStrategy ensures the stateful set is configured back to using RollingUpdateStatefulSetStrategyType @@ -348,21 +400,6 @@ func (r ReplicaSetReconciler) setAnnotations(nsName types.NamespacedName, annota }) } -// updateAndReturnStatusSuccess should be called after a successful reconciliation -// the resource's status is updated to reflect to the state, and any other cleanup -// operators should be performed here -func (r ReplicaSetReconciler) updateAndReturnStatusSuccess(mdb *mdbv1.MongoDB) (mdbv1.MongoDBStatus, error) { - newMdb := &mdbv1.MongoDB{} - if err := r.client.Get(context.TODO(), mdb.NamespacedName(), newMdb); err != nil { - return mdbv1.MongoDBStatus{}, errors.Errorf("could not get get resource: %s", err) - } - newMdb.UpdateSuccess() - if err := r.client.Status().Update(context.TODO(), newMdb); err != nil { - return mdbv1.MongoDBStatus{}, errors.Errorf(fmt.Sprintf("could not update status: %s", err)) - } - return newMdb.Status, nil -} - func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { s, err := r.buildAutomationConfigSecret(mdb) if err != nil { diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go new file mode 100644 index 000000000..e0dc13158 --- /dev/null +++ b/pkg/util/status/status.go @@ -0,0 +1,52 @@ +package status + +import ( + "context" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Option interface { + ApplyOption(mdb *mdbv1.MongoDB) + GetResult() (reconcile.Result, error) +} + +type OptionBuilder interface { + GetOptions() []Option +} + +// Update takes the options provided by the given option builder, applies them all and then updates the resource +func Update(statusWriter client.StatusWriter, mdb *mdbv1.MongoDB, optionBuilder OptionBuilder) (reconcile.Result, error) { + options := optionBuilder.GetOptions() + for _, opt := range options { + opt.ApplyOption(mdb) + } + + if err := statusWriter.Update(context.TODO(), mdb); err != nil { + return reconcile.Result{}, err + } + + return determineReconciliationResult(options) +} + +func determineReconciliationResult(options []Option) (reconcile.Result, error) { + // if there are any errors in any of our options, we return those first + for _, opt := range options { + res, err := opt.GetResult() + if err != nil { + return res, err + } + } + // otherwise we might need to re-queue + for _, opt := range options { + res, _ := opt.GetResult() + if res.Requeue || res.RequeueAfter > 0 { + return res, nil + } + } + // it was a successful reconciliation, nothing to do + return reconcile.Result{}, nil +} diff --git a/pkg/util/status/status_test.go b/pkg/util/status/status_test.go new file mode 100644 index 000000000..b8b4baeb6 --- /dev/null +++ b/pkg/util/status/status_test.go @@ -0,0 +1,88 @@ +package status + +import ( + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type errorOption struct{} + +func (e errorOption) ApplyOption(_ *mdbv1.MongoDB) {} + +func (e errorOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, errors.Errorf("error") +} + +type successOption struct{} + +func (s successOption) ApplyOption(_ *mdbv1.MongoDB) {} + +func (s successOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +type retryOption struct{} + +func (r retryOption) ApplyOption(_ *mdbv1.MongoDB) {} + +func (r retryOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{Requeue: true}, nil +} + +func TestDetermineReconciliationResult(t *testing.T) { + + t.Run("A single error option should result in an error return", func(t *testing.T) { + opts := []Option{ + errorOption{}, + successOption{}, + successOption{}, + } + + res, err := determineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("An error option takes precedence over a retry", func(t *testing.T) { + opts := []Option{ + errorOption{}, + retryOption{}, + successOption{}, + } + res, err := determineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("No errors will result in a successful reconciliation", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + successOption{}, + } + res, err := determineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("A retry will take precedence over success", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + retryOption{}, + } + res, err := determineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, true, res.Requeue) + }) + +} diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index a6c0a2fe2..fa49233a4 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -130,6 +130,7 @@ func BasicFunctionality(mdb *mdbv1.MongoDB) func(*testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: mdb.Spec.Members, })) } } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index 55941e862..b9a61175b 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -74,6 +74,7 @@ func TestReplicaSet(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), Phase: mdbv1.Running, + Members: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb0)) @@ -83,6 +84,7 @@ func TestReplicaSet(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb0.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go index a091e3069..5174e7184 100644 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go @@ -54,6 +54,7 @@ func TestReplicaSetReadinessProbeScaling(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 6da5631c5..fc768a290 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -52,6 +52,7 @@ func TestReplicaSetScale(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 5, })) t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) @@ -61,6 +62,7 @@ func TestReplicaSetScale(t *testing.T) { mdbv1.MongoDBStatus{ MongoURI: mdb.MongoURI(), Phase: mdbv1.Running, + Members: 3, })) }) }