diff --git a/api/v1/mdb/mongodb_types.go b/api/v1/mdb/mongodb_types.go index a009deaa3..199146431 100644 --- a/api/v1/mdb/mongodb_types.go +++ b/api/v1/mdb/mongodb_types.go @@ -243,6 +243,9 @@ func GetLastAdditionalMongodConfigByType(lastSpec *MongoDbSpec, configType Addit // GetLastAdditionalMongodConfigByType returns the last successfully achieved AdditionalMongodConfigType for the given component. func (m *MongoDB) GetLastAdditionalMongodConfigByType(configType AdditionalMongodConfigType) (*AdditionalMongodConfig, error) { + if m.Spec.GetResourceType() == ReplicaSet { + panic(errors.Errorf("this method cannot be used from ReplicaSet controller; use non-method GetLastAdditionalMongodConfigByType and pass lastSpec from the deployment state.")) + } if m.Spec.GetResourceType() == ShardedCluster { panic(errors.Errorf("this method cannot be used from ShardedCluster controller; use non-method GetLastAdditionalMongodConfigByType and pass lastSpec from the deployment state.")) } diff --git a/controllers/om/deployment/testing_utils.go b/controllers/om/deployment/testing_utils.go index d62a1aca2..79f0ebf82 100644 --- a/controllers/om/deployment/testing_utils.go +++ b/controllers/om/deployment/testing_utils.go @@ -26,7 +26,7 @@ func CreateFromReplicaSet(mongoDBImage string, forceEnterprise bool, rs *mdb.Mon ), zap.S()) d := om.NewDeployment() - lastConfig, err := rs.GetLastAdditionalMongodConfigByType(mdb.ReplicaSetConfig) + lastConfig, err := mdb.GetLastAdditionalMongodConfigByType(nil, mdb.ReplicaSetConfig) if err != nil { panic(err) } diff --git a/controllers/operator/mongodbreplicaset_controller.go b/controllers/operator/mongodbreplicaset_controller.go index bb30016a9..e41c328a2 100644 --- a/controllers/operator/mongodbreplicaset_controller.go +++ b/controllers/operator/mongodbreplicaset_controller.go @@ -64,7 +64,9 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/vault/vaultwatcher" ) -// ReconcileMongoDbReplicaSet reconciles a MongoDB with a type of ReplicaSet +// ReconcileMongoDbReplicaSet reconciles a MongoDB with a type of ReplicaSet. +// WARNING: do not put any mutable state into this struct. +// Controller runtime uses and shares a single instance of it. type ReconcileMongoDbReplicaSet struct { *ReconcileCommonController omConnectionFactory om.ConnectionFactory @@ -76,60 +78,134 @@ type ReconcileMongoDbReplicaSet struct { databaseNonStaticImageVersion string } +type ReplicaSetDeploymentState struct { + LastAchievedSpec *mdbv1.MongoDbSpec `json:"lastAchievedSpec"` + MemberCountBefore int `json:"memberCountBefore"` +} + var _ reconcile.Reconciler = &ReconcileMongoDbReplicaSet{} -func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { - return &ReconcileMongoDbReplicaSet{ - ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), - omConnectionFactory: omFunc, - imageUrls: imageUrls, - forceEnterprise: forceEnterprise, - enableClusterMongoDBRoles: enableClusterMongoDBRoles, +// ReplicaSetReconcilerHelper contains state and logic for a SINGLE reconcile execution. +// This object is NOT shared between reconcile invocations. +type ReplicaSetReconcilerHelper struct { + resource *mdbv1.MongoDB + deploymentState *ReplicaSetDeploymentState + reconciler *ReconcileMongoDbReplicaSet + log *zap.SugaredLogger +} - initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, - databaseNonStaticImageVersion: databaseNonStaticImageVersion, +func (r *ReconcileMongoDbReplicaSet) newReconcilerHelper( + ctx context.Context, + rs *mdbv1.MongoDB, + log *zap.SugaredLogger, +) (*ReplicaSetReconcilerHelper, error) { + helper := &ReplicaSetReconcilerHelper{ + resource: rs, + reconciler: r, + log: log, + } + + if err := helper.initialize(ctx); err != nil { + return nil, err } + + return helper, nil } -type deploymentOptionsRS struct { - agentCertPath string - agentCertHash string - prometheusCertHash string - currentAgentAuthMode string +// readState abstract reading the state of the resource that we store on the cluster between reconciliations. +func (r *ReplicaSetReconcilerHelper) readState() (*ReplicaSetDeploymentState, error) { + // Try to get the last achieved spec from annotations and store it in state + lastAchievedSpec, err := r.resource.GetLastSpec() + if err != nil { + return nil, err + } + + // Read current member count from Status once at initialization. This provides a stable view throughout + // reconciliation and prepares for eventually storing this in ConfigMap state instead of ephemeral status. + memberCountBefore := r.resource.Status.Members + + return &ReplicaSetDeploymentState{ + LastAchievedSpec: lastAchievedSpec, + MemberCountBefore: memberCountBefore, + }, nil } -// Generic Kubernetes Resources -// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=list;watch,namespace=placeholder -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch,namespace=placeholder -// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update,namespace=placeholder -// +kubebuilder:rbac:groups=core,resources={secrets,configmaps},verbs=get;list;watch;create;delete;update,namespace=placeholder -// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;get;list;watch;delete;update,namespace=placeholder +// writeState writes the state of the resource that we store on the cluster between reconciliations. +// This includes the lastAchievedSpec annotation (always written) and vault secret version annotations on successful +// reconciliation +func (r *ReplicaSetReconcilerHelper) writeState(ctx context.Context, includeVaultAnnotations bool) error { + // Serialize the state to annotations + annotationsToAdd, err := getAnnotationsForResource(r.resource) + if err != nil { + return err + } -// MongoDB Resource -// +kubebuilder:rbac:groups=mongodb.com,resources={mongodb,mongodb/status,mongodb/finalizers},verbs=*,namespace=placeholder + if includeVaultAnnotations && vault.IsVaultSecretBackend() { + secrets := r.resource.GetSecretsMountedIntoDBPod() + vaultMap := make(map[string]string) + for _, s := range secrets { + path := fmt.Sprintf("%s/%s/%s", r.reconciler.VaultClient.DatabaseSecretMetadataPath(), r.resource.Namespace, s) + vaultMap = merge.StringToStringMap(vaultMap, r.reconciler.VaultClient.GetSecretAnnotation(path)) + } + path := fmt.Sprintf("%s/%s/%s", r.reconciler.VaultClient.OperatorScretMetadataPath(), r.resource.Namespace, r.resource.Spec.Credentials) + vaultMap = merge.StringToStringMap(vaultMap, r.reconciler.VaultClient.GetSecretAnnotation(path)) + for k, val := range vaultMap { + annotationsToAdd[k] = val + } + } -// Setting up a webhook -// +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations,verbs=get;create;update;delete + // Write annotations back to the resource + if err := annotations.SetAnnotations(ctx, r.resource, annotationsToAdd, r.reconciler.client); err != nil { + return err + } -// Certificate generation -// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests,verbs=get;create;list;watch + if includeVaultAnnotations { + r.log.Debugf("Successfully wrote deployment state and vault annotations for ReplicaSet %s/%s", r.resource.Namespace, r.resource.Name) + } else { + r.log.Debugf("Successfully wrote deployment state for ReplicaSet %s/%s", r.resource.Namespace, r.resource.Name) + } + return nil +} -// Reconcile reads that state of the cluster for a MongoDbReplicaSet object and makes changes based on the state read -// and what is in the MongoDbReplicaSet.Spec -func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, e error) { - // === 1. Initial Checks and setup - log := zap.S().With("ReplicaSet", request.NamespacedName) - rs := &mdbv1.MongoDB{} +func (r *ReplicaSetReconcilerHelper) initialize(ctx context.Context) error { + state, err := r.readState() + if err != nil { + return xerrors.Errorf("Failed to initialize replica set state: %w", err) + } + r.deploymentState = state + return nil +} - if reconcileResult, err := r.prepareResourceForReconciliation(ctx, request, rs, log); err != nil { - if errors.IsNotFound(err) { - return workflow.Invalid("Object for reconciliation not found").ReconcileResult() - } - return reconcileResult, err +// updateStatus overrides the common controller's updateStatus to ensure that the deployment state +// is written after every status update. This ensures state consistency even on early returns. +// Vault annotations are only included when status.IsOK() is true, meaning reconciliation succeeded. +// This function must be executed only once per reconcile (with a return) +func (r *ReplicaSetReconcilerHelper) updateStatus(ctx context.Context, status workflow.Status, statusOptions ...mdbstatus.Option) (reconcile.Result, error) { + result, err := r.reconciler.updateStatus(ctx, r.resource, status, r.log, statusOptions...) + if err != nil { + return result, err + } + + // Write deployment state after every status update + // Include vault annotations only on successful reconciliation + if err := r.writeState(ctx, status.IsOK()); err != nil { + return r.reconciler.updateStatus(ctx, r.resource, workflow.Failed(xerrors.Errorf("Failed to write deployment state after updating status: %w", err)), r.log) } + return result, nil +} + +// Reconcile performs the full reconciliation logic for a replica set. +// This is the main entry point for all reconciliation work and contains all +// state and logic specific to a single reconcile execution. +func (r *ReplicaSetReconcilerHelper) Reconcile(ctx context.Context) (reconcile.Result, error) { + rs := r.resource + log := r.log + reconciler := r.reconciler + + // === 1. Initial Checks and setup if !architectures.IsRunningStaticArchitecture(rs.Annotations) { - agents.UpgradeAllIfNeeded(ctx, agents.ClientSecret{Client: r.client, SecretClient: r.SecretClient}, r.omConnectionFactory, GetWatchedNamespace(), false) + agents.UpgradeAllIfNeeded(ctx, agents.ClientSecret{Client: reconciler.client, SecretClient: reconciler.SecretClient}, reconciler.omConnectionFactory, GetWatchedNamespace(), false) } log.Info("-> ReplicaSet.Reconcile") @@ -137,48 +213,47 @@ func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reco log.Infow("ReplicaSet.Status", "status", rs.Status) if err := rs.ProcessValidationsOnReconcile(nil); err != nil { - return r.updateStatus(ctx, rs, workflow.Invalid("%s", err.Error()), log) + return r.updateStatus(ctx, workflow.Invalid("%s", err.Error())) } - projectConfig, credsConfig, err := project.ReadConfigAndCredentials(ctx, r.client, r.SecretClient, rs, log) + projectConfig, credsConfig, err := project.ReadConfigAndCredentials(ctx, reconciler.client, reconciler.SecretClient, rs, log) if err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(err), log) + return r.updateStatus(ctx, workflow.Failed(err)) } - conn, _, err := connection.PrepareOpsManagerConnection(ctx, r.SecretClient, projectConfig, credsConfig, r.omConnectionFactory, rs.Namespace, log) + conn, _, err := connection.PrepareOpsManagerConnection(ctx, reconciler.SecretClient, projectConfig, credsConfig, reconciler.omConnectionFactory, rs.Namespace, log) if err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(xerrors.Errorf("Failed to prepare Ops Manager connection: %w", err)), log) + return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("Failed to prepare Ops Manager connection: %w", err))) } if status := ensureSupportedOpsManagerVersion(conn); status.Phase() != mdbstatus.PhaseRunning { - return r.updateStatus(ctx, rs, status, log) + return r.updateStatus(ctx, status) } - r.SetupCommonWatchers(rs, nil, nil, rs.Name) + reconciler.SetupCommonWatchers(rs, nil, nil, rs.Name) reconcileResult := checkIfHasExcessProcesses(conn, rs.Name, log) if !reconcileResult.IsOK() { - return r.updateStatus(ctx, rs, reconcileResult, log) + return r.updateStatus(ctx, reconcileResult) } if status := validateMongoDBResource(rs, conn); !status.IsOK() { - return r.updateStatus(ctx, rs, status, log) + return r.updateStatus(ctx, status) } if status := controlledfeature.EnsureFeatureControls(*rs, conn, conn.OpsManagerVersion(), log); !status.IsOK() { - return r.updateStatus(ctx, rs, status, log) + return r.updateStatus(ctx, status) } // === 2. Auth and Certificates - // Get certificate paths for later use rsCertsConfig := certs.ReplicaSetConfig(*rs) var databaseSecretPath string - if r.VaultClient != nil { - databaseSecretPath = r.VaultClient.DatabaseSecretPath() + if reconciler.VaultClient != nil { + databaseSecretPath = reconciler.VaultClient.DatabaseSecretPath() } - tlsCertHash := enterprisepem.ReadHashFromSecret(ctx, r.SecretClient, rs.Namespace, rsCertsConfig.CertSecretName, databaseSecretPath, log) - internalClusterCertHash := enterprisepem.ReadHashFromSecret(ctx, r.SecretClient, rs.Namespace, rsCertsConfig.InternalClusterSecretName, databaseSecretPath, log) + tlsCertHash := enterprisepem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, rsCertsConfig.CertSecretName, databaseSecretPath, log) + internalClusterCertHash := enterprisepem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, rsCertsConfig.InternalClusterSecretName, databaseSecretPath, log) tlsCertPath := "" internalClusterCertPath := "" @@ -190,22 +265,22 @@ func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reco } agentCertSecretName := rs.GetSecurity().AgentClientCertificateSecretName(rs.Name) - agentCertHash, agentCertPath := r.agentCertHashAndPath(ctx, log, rs.Namespace, agentCertSecretName, databaseSecretPath) + agentCertHash, agentCertPath := reconciler.agentCertHashAndPath(ctx, log, rs.Namespace, agentCertSecretName, databaseSecretPath) - prometheusCertHash, err := certs.EnsureTLSCertsForPrometheus(ctx, r.SecretClient, rs.GetNamespace(), rs.GetPrometheus(), certs.Database, log) + prometheusCertHash, err := certs.EnsureTLSCertsForPrometheus(ctx, reconciler.SecretClient, rs.GetNamespace(), rs.GetPrometheus(), certs.Database, log) if err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(xerrors.Errorf("could not generate certificates for Prometheus: %w", err)), log) + return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("Could not generate certificates for Prometheus: %w", err))) } currentAgentAuthMode, err := conn.GetAgentAuthMode() if err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(xerrors.Errorf("failed to get agent auth mode: %w", err)), log) + return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("failed to get agent auth mode: %w", err))) } // Check if we need to prepare for scale-down - if scale.ReplicasThisReconciliation(rs) < rs.Status.Members { + if scale.ReplicasThisReconciliation(rs) < r.deploymentState.MemberCountBefore { if err := replicaset.PrepareScaleDownFromMongoDB(conn, rs, log); err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(xerrors.Errorf("Failed to prepare Replica Set for scaling down using Ops Manager: %w", err)), log) + return r.updateStatus(ctx, workflow.Failed(xerrors.Errorf("Failed to prepare Replica Set for scaling down using Ops Manager: %w", err))) } } deploymentOpts := deploymentOptionsRS{ @@ -218,17 +293,16 @@ func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reco // 3. Search Overrides // Apply search overrides early so searchCoordinator role is present before ensureRoles runs // This must happen before the ordering logic to ensure roles are synced regardless of order - shouldMirrorKeyfileForMongot := r.applySearchOverrides(ctx, rs, log) + shouldMirrorKeyfileForMongot := r.applySearchOverrides(ctx) // 4. Recovery - // Recovery prevents some deadlocks that can occur during reconciliation, e.g. the setting of an incorrect automation // configuration and a subsequent attempt to overwrite it later, the operator would be stuck in Pending phase. // See CLOUDP-189433 and CLOUDP-229222 for more details. if recovery.ShouldTriggerRecovery(rs.Status.Phase != mdbstatus.PhaseRunning, rs.Status.LastTransition) { log.Warnf("Triggering Automatic Recovery. The MongoDB resource %s/%s is in %s state since %s", rs.Namespace, rs.Name, rs.Status.Phase, rs.Status.LastTransition) - automationConfigStatus := r.updateOmDeploymentRs(ctx, conn, rs.Status.Members, rs, log, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, true).OnErrorPrepend("Failed to create/update (Ops Manager reconciliation phase):") - reconcileStatus := r.reconcileMemberResources(ctx, rs, conn, log, projectConfig, deploymentOpts) + automationConfigStatus := r.updateOmDeploymentRs(ctx, conn, r.deploymentState.MemberCountBefore, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, true).OnErrorPrepend("Failed to create/update (Ops Manager reconciliation phase):") + reconcileStatus := r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) if !reconcileStatus.IsOK() { log.Errorf("Recovery failed because of reconcile errors, %v", reconcileStatus) } @@ -238,54 +312,85 @@ func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reco } // 5. Actual reconciliation execution, Ops Manager and kubernetes resources update - lastSpec, err := rs.GetLastSpec() - if err != nil { - lastSpec = &mdbv1.MongoDbSpec{} - } - - publishAutomationConfigFirst := publishAutomationConfigFirstRS(ctx, r.client, *rs, lastSpec, deploymentOpts.currentAgentAuthMode, projectConfig.SSLMMSCAConfigMap, log) + publishAutomationConfigFirst := publishAutomationConfigFirstRS(ctx, reconciler.client, *rs, r.deploymentState.LastAchievedSpec, deploymentOpts.currentAgentAuthMode, projectConfig.SSLMMSCAConfigMap, log) status := workflow.RunInGivenOrder(publishAutomationConfigFirst, func() workflow.Status { - return r.updateOmDeploymentRs(ctx, conn, rs.Status.Members, rs, log, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, false).OnErrorPrepend("Failed to create/update (Ops Manager reconciliation phase):") + return r.updateOmDeploymentRs(ctx, conn, r.deploymentState.MemberCountBefore, tlsCertPath, internalClusterCertPath, deploymentOpts, shouldMirrorKeyfileForMongot, false).OnErrorPrepend("Failed to create/update (Ops Manager reconciliation phase):") }, func() workflow.Status { - return r.reconcileMemberResources(ctx, rs, conn, log, projectConfig, deploymentOpts) + return r.reconcileMemberResources(ctx, conn, projectConfig, deploymentOpts) }) if !status.IsOK() { - return r.updateStatus(ctx, rs, status, log) + return r.updateStatus(ctx, status) } // === 6. Final steps if scale.IsStillScaling(rs) { - return r.updateStatus(ctx, rs, workflow.Pending("Continuing scaling operation for ReplicaSet %s, desiredMembers=%d, currentMembers=%d", rs.ObjectKey(), rs.DesiredReplicas(), scale.ReplicasThisReconciliation(rs)), log, mdbstatus.MembersOption(rs)) + return r.updateStatus(ctx, workflow.Pending("Continuing scaling operation for ReplicaSet %s, desiredMembers=%d, currentMembers=%d", rs.ObjectKey(), rs.DesiredReplicas(), scale.ReplicasThisReconciliation(rs)), mdbstatus.MembersOption(rs)) } - annotationsToAdd, err := getAnnotationsForResource(rs) - if err != nil { - return r.updateStatus(ctx, rs, workflow.Failed(err), log) + log.Infof("Finished reconciliation for MongoDbReplicaSet! %s", completionMessage(conn.BaseURL(), conn.GroupID())) + return r.updateStatus(ctx, workflow.OK(), mdbstatus.NewBaseUrlOption(deployment.Link(conn.BaseURL(), conn.GroupID())), mdbstatus.MembersOption(rs), mdbstatus.NewPVCsStatusOptionEmptyStatus()) +} + +func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { + return &ReconcileMongoDbReplicaSet{ + ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), + omConnectionFactory: omFunc, + imageUrls: imageUrls, + forceEnterprise: forceEnterprise, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, + + initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, + databaseNonStaticImageVersion: databaseNonStaticImageVersion, } +} - if vault.IsVaultSecretBackend() { - secrets := rs.GetSecretsMountedIntoDBPod() - vaultMap := make(map[string]string) - for _, s := range secrets { - path := fmt.Sprintf("%s/%s/%s", r.VaultClient.DatabaseSecretMetadataPath(), rs.Namespace, s) - vaultMap = merge.StringToStringMap(vaultMap, r.VaultClient.GetSecretAnnotation(path)) - } - path := fmt.Sprintf("%s/%s/%s", r.VaultClient.OperatorScretMetadataPath(), rs.Namespace, rs.Spec.Credentials) - vaultMap = merge.StringToStringMap(vaultMap, r.VaultClient.GetSecretAnnotation(path)) - for k, val := range vaultMap { - annotationsToAdd[k] = val +type deploymentOptionsRS struct { + agentCertPath string + agentCertHash string + prometheusCertHash string + currentAgentAuthMode string +} + +// Generic Kubernetes Resources +// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=list;watch,namespace=placeholder +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch,namespace=placeholder +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update,namespace=placeholder +// +kubebuilder:rbac:groups=core,resources={secrets,configmaps},verbs=get;list;watch;create;delete;update,namespace=placeholder +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=create;get;list;watch;delete;update,namespace=placeholder + +// MongoDB Resource +// +kubebuilder:rbac:groups=mongodb.com,resources={mongodb,mongodb/status,mongodb/finalizers},verbs=*,namespace=placeholder + +// Setting up a webhook +// +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations,verbs=get;create;update;delete + +// Certificate generation +// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests,verbs=get;create;list;watch + +// Reconcile reads that state of the cluster for a MongoDbReplicaSet object and makes changes based on the state read +// and what is in the MongoDbReplicaSet.Spec +func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, e error) { + log := zap.S().With("ReplicaSet", request.NamespacedName) + rs := &mdbv1.MongoDB{} + + if reconcileResult, err := r.prepareResourceForReconciliation(ctx, request, rs, log); err != nil { + if errors.IsNotFound(err) { + return workflow.Invalid("Object for reconciliation not found").ReconcileResult() } + return reconcileResult, err } - if err := annotations.SetAnnotations(ctx, rs, annotationsToAdd, r.client); err != nil { + // Create helper for THIS reconciliation + helper, err := r.newReconcilerHelper(ctx, rs, log) + if err != nil { return r.updateStatus(ctx, rs, workflow.Failed(err), log) } - log.Infof("Finished reconciliation for MongoDbReplicaSet! %s", completionMessage(conn.BaseURL(), conn.GroupID())) - return r.updateStatus(ctx, rs, workflow.OK(), log, mdbstatus.NewBaseUrlOption(deployment.Link(conn.BaseURL(), conn.GroupID())), mdbstatus.MembersOption(rs), mdbstatus.NewPVCsStatusOptionEmptyStatus()) + // Delegate all reconciliation logic to helper + return helper.Reconcile(ctx) } func publishAutomationConfigFirstRS(ctx context.Context, getter kubernetesClient.Client, mdb mdbv1.MongoDB, lastSpec *mdbv1.MongoDbSpec, currentAgentAuthMode string, sslMMSCAConfigMap string, log *zap.SugaredLogger) bool { @@ -339,7 +444,7 @@ func publishAutomationConfigFirstRS(ctx context.Context, getter kubernetesClient return false } -func getHostnameOverrideConfigMapForReplicaset(mdb mdbv1.MongoDB) corev1.ConfigMap { +func getHostnameOverrideConfigMapForReplicaset(mdb *mdbv1.MongoDB) corev1.ConfigMap { data := make(map[string]string) if mdb.Spec.DbCommonSpec.GetExternalDomain() != nil { @@ -359,12 +464,12 @@ func getHostnameOverrideConfigMapForReplicaset(mdb mdbv1.MongoDB) corev1.ConfigM return cm } -func (r *ReconcileMongoDbReplicaSet) reconcileHostnameOverrideConfigMap(ctx context.Context, log *zap.SugaredLogger, getUpdateCreator configmap.GetUpdateCreator, mdb mdbv1.MongoDB) error { - if mdb.Spec.DbCommonSpec.GetExternalDomain() == nil { +func (r *ReplicaSetReconcilerHelper) reconcileHostnameOverrideConfigMap(ctx context.Context, log *zap.SugaredLogger, getUpdateCreator configmap.GetUpdateCreator) error { + if r.resource.Spec.DbCommonSpec.GetExternalDomain() == nil { return nil } - cm := getHostnameOverrideConfigMapForReplicaset(mdb) + cm := getHostnameOverrideConfigMapForReplicaset(r.resource) err := configmap.CreateOrUpdate(ctx, getUpdateCreator, cm) if err != nil && !errors.IsAlreadyExists(err) { return xerrors.Errorf("failed to create configmap: %s, err: %w", cm.Name, err) @@ -377,38 +482,42 @@ func (r *ReconcileMongoDbReplicaSet) reconcileHostnameOverrideConfigMap(ctx cont // reconcileMemberResources handles the synchronization of kubernetes resources, which can be statefulsets, services etc. // All the resources required in the k8s cluster (as opposed to the automation config) for creating the replicaset // should be reconciled in this method. -func (r *ReconcileMongoDbReplicaSet) reconcileMemberResources(ctx context.Context, rs *mdbv1.MongoDB, conn om.Connection, - log *zap.SugaredLogger, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS, -) workflow.Status { +func (r *ReplicaSetReconcilerHelper) reconcileMemberResources(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS) workflow.Status { + rs := r.resource + reconciler := r.reconciler + log := r.log + // Reconcile hostname override ConfigMap - if err := r.reconcileHostnameOverrideConfigMap(ctx, log, r.client, *rs); err != nil { + if err := r.reconcileHostnameOverrideConfigMap(ctx, log, r.reconciler.client); err != nil { return workflow.Failed(xerrors.Errorf("Failed to reconcileHostnameOverrideConfigMap: %w", err)) } // Ensure roles are properly configured - if status := r.ensureRoles(ctx, rs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), log); !status.IsOK() { + if status := reconciler.ensureRoles(ctx, rs.Spec.DbCommonSpec, reconciler.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), log); !status.IsOK() { return status } - return r.reconcileStatefulSet(ctx, rs, log, conn, projectConfig, deploymentOptions) + return r.reconcileStatefulSet(ctx, conn, projectConfig, deploymentOptions) } -func (r *ReconcileMongoDbReplicaSet) reconcileStatefulSet(ctx context.Context, rs *mdbv1.MongoDB, - log *zap.SugaredLogger, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS, -) workflow.Status { - certConfigurator := certs.ReplicaSetX509CertConfigurator{MongoDB: rs, SecretClient: r.SecretClient} - status := r.ensureX509SecretAndCheckTLSType(ctx, certConfigurator, deploymentOptions.currentAgentAuthMode, log) +func (r *ReplicaSetReconcilerHelper) reconcileStatefulSet(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS) workflow.Status { + rs := r.resource + reconciler := r.reconciler + log := r.log + + certConfigurator := certs.ReplicaSetX509CertConfigurator{MongoDB: rs, SecretClient: reconciler.SecretClient} + status := reconciler.ensureX509SecretAndCheckTLSType(ctx, certConfigurator, deploymentOptions.currentAgentAuthMode, log) if !status.IsOK() { return status } - status = certs.EnsureSSLCertsForStatefulSet(ctx, r.SecretClient, r.SecretClient, *rs.Spec.Security, certs.ReplicaSetConfig(*rs), log) + status = certs.EnsureSSLCertsForStatefulSet(ctx, reconciler.SecretClient, reconciler.SecretClient, *rs.Spec.Security, certs.ReplicaSetConfig(*rs), log) if !status.IsOK() { return status } // Build the replica set config - rsConfig, err := r.buildStatefulSetOptions(ctx, rs, conn, projectConfig, deploymentOptions.currentAgentAuthMode, deploymentOptions.prometheusCertHash, deploymentOptions.agentCertHash, log) + rsConfig, err := r.buildStatefulSetOptions(ctx, conn, projectConfig, deploymentOptions) if err != nil { return workflow.Failed(xerrors.Errorf("failed to build StatefulSet options: %w", err)) } @@ -416,21 +525,17 @@ func (r *ReconcileMongoDbReplicaSet) reconcileStatefulSet(ctx context.Context, r sts := construct.DatabaseStatefulSet(*rs, rsConfig, log) // Handle PVC resize if needed - workflowStatus := create.HandlePVCResize(ctx, r.client, &sts, log) - if !workflowStatus.IsOK() { + if workflowStatus := r.handlePVCResize(ctx, &sts); !workflowStatus.IsOK() { return workflowStatus } - if workflow.ContainsPVCOption(workflowStatus.StatusOptions()) { - _, _ = r.updateStatus(ctx, rs, workflow.Pending(""), log, workflowStatus.StatusOptions()...) - } // Create or update the StatefulSet in Kubernetes - if err := create.DatabaseInKubernetes(ctx, r.client, *rs, sts, rsConfig, log); err != nil { + if err := create.DatabaseInKubernetes(ctx, reconciler.client, *rs, sts, rsConfig, log); err != nil { return workflow.Failed(xerrors.Errorf("Failed to create/update (Kubernetes reconciliation phase): %w", err)) } // Check StatefulSet status - if status := statefulset.GetStatefulSetStatus(ctx, rs.Namespace, rs.Name, r.client); !status.IsOK() { + if status := statefulset.GetStatefulSetStatus(ctx, rs.Namespace, rs.Name, reconciler.client); !status.IsOK() { return status } @@ -438,15 +543,33 @@ func (r *ReconcileMongoDbReplicaSet) reconcileStatefulSet(ctx context.Context, r return workflow.OK() } +func (r *ReplicaSetReconcilerHelper) handlePVCResize(ctx context.Context, sts *appsv1.StatefulSet) workflow.Status { + workflowStatus := create.HandlePVCResize(ctx, r.reconciler.client, sts, r.log) + if !workflowStatus.IsOK() { + return workflowStatus + } + + if workflow.ContainsPVCOption(workflowStatus.StatusOptions()) { + if _, err := r.reconciler.updateStatus(ctx, r.resource, workflow.Pending(""), r.log, workflowStatus.StatusOptions()...); err != nil { + return workflow.Failed(xerrors.Errorf("error updating status: %w", err)) + } + } + return workflow.OK() +} + // buildStatefulSetOptions creates the options needed for constructing the StatefulSet -func (r *ReconcileMongoDbReplicaSet) buildStatefulSetOptions(ctx context.Context, rs *mdbv1.MongoDB, conn om.Connection, projectConfig mdbv1.ProjectConfig, currentAgentAuthMode string, prometheusCertHash string, agentCertHash string, log *zap.SugaredLogger) (func(mdb mdbv1.MongoDB) construct.DatabaseStatefulSetOptions, error) { +func (r *ReplicaSetReconcilerHelper) buildStatefulSetOptions(ctx context.Context, conn om.Connection, projectConfig mdbv1.ProjectConfig, deploymentOptions deploymentOptionsRS) (func(mdb mdbv1.MongoDB) construct.DatabaseStatefulSetOptions, error) { + rs := r.resource + reconciler := r.reconciler + log := r.log + rsCertsConfig := certs.ReplicaSetConfig(*rs) var vaultConfig vault.VaultConfiguration var databaseSecretPath string - if r.VaultClient != nil { - vaultConfig = r.VaultClient.VaultConfig - databaseSecretPath = r.VaultClient.DatabaseSecretPath() + if reconciler.VaultClient != nil { + vaultConfig = reconciler.VaultClient.VaultConfig + databaseSecretPath = reconciler.VaultClient.DatabaseSecretPath() } // Determine automation agent version for static architecture @@ -456,30 +579,30 @@ func (r *ReconcileMongoDbReplicaSet) buildStatefulSetOptions(ctx context.Context // happens after creating the StatefulSet definition. if !rs.IsAgentImageOverridden() { var err error - automationAgentVersion, err = r.getAgentVersion(conn, conn.OpsManagerVersion().VersionString, false, log) + automationAgentVersion, err = reconciler.getAgentVersion(conn, conn.OpsManagerVersion().VersionString, false, log) if err != nil { return nil, xerrors.Errorf("Impossible to get agent version, please override the agent image by providing a pod template: %w", err) } } } - tlsCertHash := enterprisepem.ReadHashFromSecret(ctx, r.SecretClient, rs.Namespace, rsCertsConfig.CertSecretName, databaseSecretPath, log) - internalClusterCertHash := enterprisepem.ReadHashFromSecret(ctx, r.SecretClient, rs.Namespace, rsCertsConfig.InternalClusterSecretName, databaseSecretPath, log) + tlsCertHash := enterprisepem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, rsCertsConfig.CertSecretName, databaseSecretPath, log) + internalClusterCertHash := enterprisepem.ReadHashFromSecret(ctx, reconciler.SecretClient, rs.Namespace, rsCertsConfig.InternalClusterSecretName, databaseSecretPath, log) rsConfig := construct.ReplicaSetOptions( PodEnvVars(newPodVars(conn, projectConfig, rs.Spec.LogLevel)), - CurrentAgentAuthMechanism(currentAgentAuthMode), + CurrentAgentAuthMechanism(deploymentOptions.currentAgentAuthMode), CertificateHash(tlsCertHash), - AgentCertHash(agentCertHash), + AgentCertHash(deploymentOptions.agentCertHash), InternalClusterHash(internalClusterCertHash), - PrometheusTLSCertHash(prometheusCertHash), + PrometheusTLSCertHash(deploymentOptions.prometheusCertHash), WithVaultConfig(vaultConfig), WithLabels(rs.Labels), WithAdditionalMongodConfig(rs.Spec.GetAdditionalMongodConfig()), - WithInitDatabaseNonStaticImage(images.ContainerImage(r.imageUrls, util.InitDatabaseImageUrlEnv, r.initDatabaseNonStaticImageVersion)), - WithDatabaseNonStaticImage(images.ContainerImage(r.imageUrls, util.NonStaticDatabaseEnterpriseImage, r.databaseNonStaticImageVersion)), - WithAgentImage(images.ContainerImage(r.imageUrls, architectures.MdbAgentImageRepo, automationAgentVersion)), - WithMongodbImage(images.GetOfficialImage(r.imageUrls, rs.Spec.Version, rs.GetAnnotations())), + WithInitDatabaseNonStaticImage(images.ContainerImage(reconciler.imageUrls, util.InitDatabaseImageUrlEnv, reconciler.initDatabaseNonStaticImageVersion)), + WithDatabaseNonStaticImage(images.ContainerImage(reconciler.imageUrls, util.NonStaticDatabaseEnterpriseImage, reconciler.databaseNonStaticImageVersion)), + WithAgentImage(images.ContainerImage(reconciler.imageUrls, architectures.MdbAgentImageRepo, automationAgentVersion)), + WithMongodbImage(images.GetOfficialImage(reconciler.imageUrls, rs.Spec.Version, rs.GetAnnotations())), ) return rsConfig, nil @@ -566,7 +689,10 @@ func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls // updateOmDeploymentRs performs OM registration operation for the replicaset. So the changes will be finally propagated // to automation agents in containers -func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, conn om.Connection, membersNumberBefore int, rs *mdbv1.MongoDB, log *zap.SugaredLogger, tlsCertPath, internalClusterCertPath string, deploymentOptionsRS deploymentOptionsRS, shouldMirrorKeyfileForMongot bool, isRecovering bool) workflow.Status { +func (r *ReplicaSetReconcilerHelper) updateOmDeploymentRs(ctx context.Context, conn om.Connection, membersNumberBefore int, tlsCertPath, internalClusterCertPath string, deploymentOptions deploymentOptionsRS, shouldMirrorKeyfileForMongot bool, isRecovering bool) workflow.Status { + rs := r.resource + log := r.log + reconciler := r.reconciler log.Debug("Entering UpdateOMDeployments") // Only "concrete" RS members should be observed // - if scaling down, let's observe only members that will remain after scale-down operation @@ -580,7 +706,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c caFilePath := fmt.Sprintf("%s/ca-pem", util.TLSCaMountPath) // If current operation is to Disable TLS, then we should the current members of the Replica Set, // this is, do not scale them up or down util TLS disabling has completed. - shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(conn, r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, membersNumberBefore, rs, log, caFilePath, tlsCertPath) + shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(conn, reconciler.imageUrls[mcoConstruct.MongodbImageEnv], reconciler.forceEnterprise, membersNumberBefore, rs, caFilePath, tlsCertPath, r.deploymentState.LastAchievedSpec, log) if err != nil && !isRecovering { return workflow.Failed(err) } @@ -603,15 +729,15 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c updatedMembers = replicasTarget } - replicaSet := replicaset.BuildFromMongoDBWithReplicas(r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, rs, updatedMembers, rs.CalculateFeatureCompatibilityVersion(), tlsCertPath) + replicaSet := replicaset.BuildFromMongoDBWithReplicas(reconciler.imageUrls[mcoConstruct.MongodbImageEnv], reconciler.forceEnterprise, rs, updatedMembers, rs.CalculateFeatureCompatibilityVersion(), tlsCertPath) processNames := replicaSet.GetProcessNames() - status, additionalReconciliationRequired := r.updateOmAuthentication(ctx, conn, processNames, rs, deploymentOptionsRS.agentCertPath, caFilePath, internalClusterCertPath, isRecovering, log) + status, additionalReconciliationRequired := reconciler.updateOmAuthentication(ctx, conn, processNames, rs, deploymentOptions.agentCertPath, caFilePath, internalClusterCertPath, isRecovering, log) if !status.IsOK() && !isRecovering { return status } - lastRsConfig, err := rs.GetLastAdditionalMongodConfigByType(mdbv1.ReplicaSetConfig) + lastRsConfig, err := mdbv1.GetLastAdditionalMongodConfigByType(r.deploymentState.LastAchievedSpec, mdbv1.ReplicaSetConfig) if err != nil && !isRecovering { return workflow.Failed(err) } @@ -619,15 +745,15 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c prometheusConfiguration := PrometheusConfiguration{ prometheus: rs.GetPrometheus(), conn: conn, - secretsClient: r.SecretClient, + secretsClient: reconciler.SecretClient, namespace: rs.GetNamespace(), - prometheusCertHash: deploymentOptionsRS.prometheusCertHash, + prometheusCertHash: deploymentOptions.prometheusCertHash, } err = conn.ReadUpdateDeployment( func(d om.Deployment) error { if shouldMirrorKeyfileForMongot { - if err := r.mirrorKeyfileIntoSecretForMongot(ctx, d, rs, log); err != nil { + if err := r.mirrorKeyfileIntoSecretForMongot(ctx, d); err != nil { return err } } @@ -660,7 +786,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c return workflow.Failed(err) } - if status := r.ensureBackupConfigurationAndUpdateStatus(ctx, conn, rs, r.SecretClient, log); !status.IsOK() && !isRecovering { + if status := reconciler.ensureBackupConfigurationAndUpdateStatus(ctx, conn, rs, reconciler.SecretClient, log); !status.IsOK() && !isRecovering { return status } @@ -671,7 +797,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c // updateOmDeploymentDisableTLSConfiguration checks if TLS configuration needs // to be disabled. In which case it will disable it and inform to the calling // function. -func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage string, forceEnterprise bool, membersNumberBefore int, rs *mdbv1.MongoDB, log *zap.SugaredLogger, caFilePath, tlsCertPath string) (bool, error) { +func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage string, forceEnterprise bool, membersNumberBefore int, rs *mdbv1.MongoDB, caFilePath, tlsCertPath string, lastSpec *mdbv1.MongoDbSpec, log *zap.SugaredLogger) (bool, error) { tlsConfigWasDisabled := false err := conn.ReadUpdateDeployment( @@ -687,7 +813,7 @@ func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage // there's a scale up change at the same time). replicaSet := replicaset.BuildFromMongoDBWithReplicas(mongoDBImage, forceEnterprise, rs, membersNumberBefore, rs.CalculateFeatureCompatibilityVersion(), tlsCertPath) - lastConfig, err := rs.GetLastAdditionalMongodConfigByType(mdbv1.ReplicaSetConfig) + lastConfig, err := mdbv1.GetLastAdditionalMongodConfigByType(lastSpec, mdbv1.ReplicaSetConfig) if err != nil { return err } @@ -702,19 +828,30 @@ func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage return tlsConfigWasDisabled, err } -func (r *ReconcileMongoDbReplicaSet) OnDelete(ctx context.Context, obj runtime.Object, log *zap.SugaredLogger) error { +func (r *ReplicaSetReconcilerHelper) OnDelete(ctx context.Context, obj runtime.Object, log *zap.SugaredLogger) error { rs := obj.(*mdbv1.MongoDB) - projectConfig, credsConfig, err := project.ReadConfigAndCredentials(ctx, r.client, r.SecretClient, rs, log) + if err := r.cleanOpsManagerState(ctx, rs, log); err != nil { + return err + } + + r.reconciler.resourceWatcher.RemoveDependentWatchedResources(rs.ObjectKey()) + + return nil +} + +func (r *ReplicaSetReconcilerHelper) cleanOpsManagerState(ctx context.Context, rs *mdbv1.MongoDB, log *zap.SugaredLogger) error { + projectConfig, credsConfig, err := project.ReadConfigAndCredentials(ctx, r.reconciler.client, r.reconciler.SecretClient, rs, log) if err != nil { return err } log.Infow("Removing replica set from Ops Manager", "config", rs.Spec) - conn, _, err := connection.PrepareOpsManagerConnection(ctx, r.SecretClient, projectConfig, credsConfig, r.omConnectionFactory, rs.Namespace, log) + conn, _, err := connection.PrepareOpsManagerConnection(ctx, r.reconciler.SecretClient, projectConfig, credsConfig, r.reconciler.omConnectionFactory, rs.Namespace, log) if err != nil { return err } + processNames := make([]string, 0) err = conn.ReadUpdateDeployment( func(d om.Deployment) error { @@ -743,6 +880,8 @@ func (r *ReconcileMongoDbReplicaSet) OnDelete(ctx context.Context, obj runtime.O } } + // During deletion, calculate the maximum number of hosts that could possibly exist to ensure complete cleanup. + // Reading from Status here is appropriate since this is outside the reconciliation loop. hostsToRemove, _ := dns.GetDNSNames(rs.Name, rs.ServiceName(), rs.Namespace, rs.Spec.GetClusterDomain(), util.MaxInt(rs.Status.Members, rs.Spec.Members), nil) log.Infow("Stop monitoring removed hosts in Ops Manager", "removedHosts", hostsToRemove) @@ -750,12 +889,10 @@ func (r *ReconcileMongoDbReplicaSet) OnDelete(ctx context.Context, obj runtime.O return err } - if err := r.clearProjectAuthenticationSettings(ctx, conn, rs, processNames, log); err != nil { + if err := r.reconciler.clearProjectAuthenticationSettings(ctx, conn, rs, processNames, log); err != nil { return err } - r.resourceWatcher.RemoveDependentWatchedResources(rs.ObjectKey()) - log.Infow("Clear feature control for group: %s", "groupID", conn.GroupID()) if result := controlledfeature.ClearFeatureControls(conn, conn.OpsManagerVersion(), log); !result.IsOK() { result.Log(log) @@ -766,13 +903,24 @@ func (r *ReconcileMongoDbReplicaSet) OnDelete(ctx context.Context, obj runtime.O return nil } +func (r *ReconcileMongoDbReplicaSet) OnDelete(ctx context.Context, obj runtime.Object, log *zap.SugaredLogger) error { + helper, err := r.newReconcilerHelper(ctx, obj.(*mdbv1.MongoDB), log) + if err != nil { + return err + } + return helper.OnDelete(ctx, obj, log) +} + func getAllHostsForReplicas(rs *mdbv1.MongoDB, membersCount int) []string { hostnames, _ := dns.GetDNSNames(rs.Name, rs.ServiceName(), rs.Namespace, rs.Spec.GetClusterDomain(), membersCount, rs.Spec.DbCommonSpec.GetExternalDomain()) return hostnames } -func (r *ReconcileMongoDbReplicaSet) applySearchOverrides(ctx context.Context, rs *mdbv1.MongoDB, log *zap.SugaredLogger) bool { - search := r.lookupCorrespondingSearchResource(ctx, rs, log) +func (r *ReplicaSetReconcilerHelper) applySearchOverrides(ctx context.Context) bool { + rs := r.resource + log := r.log + + search := r.lookupCorrespondingSearchResource(ctx) if search == nil { log.Debugf("No MongoDBSearch resource found, skipping search overrides") return false @@ -798,27 +946,34 @@ func (r *ReconcileMongoDbReplicaSet) applySearchOverrides(ctx context.Context, r return true } -func (r *ReconcileMongoDbReplicaSet) mirrorKeyfileIntoSecretForMongot(ctx context.Context, d om.Deployment, rs *mdbv1.MongoDB, log *zap.SugaredLogger) error { +func (r *ReplicaSetReconcilerHelper) mirrorKeyfileIntoSecretForMongot(ctx context.Context, d om.Deployment) error { + rs := r.resource + reconciler := r.reconciler + log := r.log + keyfileContents := maputil.ReadMapValueAsString(d, "auth", "key") keyfileSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-%s", rs.Name, searchcontroller.MongotKeyfileFilename), Namespace: rs.Namespace}} log.Infof("Mirroring the replicaset %s's keyfile into the secret %s", rs.ObjectKey(), kube.ObjectKeyFromApiObject(keyfileSecret)) - _, err := controllerutil.CreateOrUpdate(ctx, r.client, keyfileSecret, func() error { + _, err := controllerutil.CreateOrUpdate(ctx, reconciler.client, keyfileSecret, func() error { keyfileSecret.StringData = map[string]string{searchcontroller.MongotKeyfileFilename: keyfileContents} - return controllerutil.SetOwnerReference(rs, keyfileSecret, r.client.Scheme()) + return controllerutil.SetOwnerReference(rs, keyfileSecret, reconciler.client.Scheme()) }) if err != nil { return xerrors.Errorf("Failed to mirror the replicaset's keyfile into a secret: %w", err) - } else { - return nil } + return nil } -func (r *ReconcileMongoDbReplicaSet) lookupCorrespondingSearchResource(ctx context.Context, rs *mdbv1.MongoDB, log *zap.SugaredLogger) *searchv1.MongoDBSearch { +func (r *ReplicaSetReconcilerHelper) lookupCorrespondingSearchResource(ctx context.Context) *searchv1.MongoDBSearch { + rs := r.resource + reconciler := r.reconciler + log := r.log + var search *searchv1.MongoDBSearch searchList := &searchv1.MongoDBSearchList{} - if err := r.client.List(ctx, searchList, &client.ListOptions{ + if err := reconciler.client.List(ctx, searchList, &client.ListOptions{ FieldSelector: fields.OneTermEqualSelector(searchcontroller.MongoDBSearchIndexFieldName, rs.GetNamespace()+"/"+rs.GetName()), }); err != nil { log.Debugf("Failed to list MongoDBSearch resources: %v", err) diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 787883ffb..dfda4efc9 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -398,22 +398,22 @@ func TestUpdateDeploymentTLSConfiguration(t *testing.T) { deploymentNoTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rsNoTLS) // TLS Disabled -> TLS Disabled - shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsNoTLS, zap.S(), util.CAFilePathInContainer, "") + shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsNoTLS, util.CAFilePathInContainer, "", nil, zap.S()) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Disabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsWithTLS, zap.S(), util.CAFilePathInContainer, "") + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsWithTLS, util.CAFilePathInContainer, "", nil, zap.S()) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsWithTLS, zap.S(), util.CAFilePathInContainer, "") + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsWithTLS, util.CAFilePathInContainer, "", nil, zap.S()) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Disabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsNoTLS, zap.S(), util.CAFilePathInContainer, "") + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsNoTLS, util.CAFilePathInContainer, "", nil, zap.S()) assert.NoError(t, err) assert.True(t, shouldLockMembers) }