diff --git a/README.md b/README.md index 257043e5be..135cd808ff 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ The full documentation for the Operator can be found [here](https://docs.atlas.m kubectl apply -f https://raw.githubusercontent.com/mongodb/mongodb-atlas-kubernetes/main/deploy/all-in-one.yaml ``` -### Step 2. Create Atlas Cluster +### Step 2. Create Atlas Deployment **1.** Create an Atlas API Key Secret @@ -39,7 +39,7 @@ kubectl label secret mongodb-atlas-operator-api-key atlas.mongodb.com/type=crede **2.** Create an `AtlasProject` Custom Resource The `AtlasProject` CustomResource represents Atlas Projects in our Kubernetes cluster. You need to specify -`projectIpAccessList` with the IP addresses or CIDR blocks of any hosts that will connect to the Atlas Cluster. +`projectIpAccessList` with the IP addresses or CIDR blocks of any hosts that will connect to the Atlas Deployment. ``` cat < 0 { - // filtering the scope clusters by the ones existing in Atlas - for _, c := range scopeClusters { - for _, a := range allClustersInProject { - if a == c { - clustersToCheck = append(clustersToCheck, c) +func filterScopeDeployments(user mdbv1.AtlasDatabaseUser, allDeploymentsInProject []string) []string { + scopeDeployments := user.GetScopes(mdbv1.DeploymentScopeType) + var deploymentsToCheck []string + if len(scopeDeployments) > 0 { + // filtering the scope deployments by the ones existing in Atlas + for _, scopeDep := range scopeDeployments { + for _, projectDep := range allDeploymentsInProject { + if projectDep == scopeDep { + deploymentsToCheck = append(deploymentsToCheck, scopeDep) break } } } } - return clustersToCheck + return deploymentsToCheck } func shouldUpdate(log *zap.SugaredLogger, atlasSpec *mongodbatlas.DatabaseUser, operatorDBUser mdbv1.AtlasDatabaseUser, currentPasswordResourceVersion string) (bool, error) { @@ -235,7 +235,7 @@ func shouldUpdate(log *zap.SugaredLogger, atlasSpec *mongodbatlas.DatabaseUser, return passwordsChanged, nil } -// TODO move to a separate utils (reuse from clusters) +// TODO move to a separate utils (reuse from deployments) func userMatchesSpec(log *zap.SugaredLogger, atlasSpec *mongodbatlas.DatabaseUser, operatorSpec mdbv1.AtlasDatabaseUserSpec) (bool, error) { userMerged := mongodbatlas.DatabaseUser{} if err := compat.JSONCopy(&userMerged, atlasSpec); err != nil { diff --git a/pkg/controller/atlasdatabaseuser/databaseuser_test.go b/pkg/controller/atlasdatabaseuser/databaseuser_test.go index a8c78b04b6..18a779338f 100644 --- a/pkg/controller/atlasdatabaseuser/databaseuser_test.go +++ b/pkg/controller/atlasdatabaseuser/databaseuser_test.go @@ -34,13 +34,13 @@ func TestFilterScopeClusters(t *testing.T) { Type: mdbv1.DataLakeScopeType, }, { Name: "cluster1", - Type: mdbv1.ClusterScopeType, + Type: mdbv1.DeploymentScopeType, }, { Name: "cluster2", - Type: mdbv1.ClusterScopeType, + Type: mdbv1.DeploymentScopeType, }} clusters := []string{"cluster1", "cluster4", "cluster5"} - scopeClusters := filterScopeClusters(mdbv1.AtlasDatabaseUser{Spec: mdbv1.AtlasDatabaseUserSpec{Scopes: scopeSpecs}}, clusters) + scopeClusters := filterScopeDeployments(mdbv1.AtlasDatabaseUser{Spec: mdbv1.AtlasDatabaseUserSpec{Scopes: scopeSpecs}}, clusters) assert.Equal(t, []string{"cluster1"}, scopeClusters) } diff --git a/pkg/controller/atlasdeployment/advanced_deployment.go b/pkg/controller/atlasdeployment/advanced_deployment.go index 7e8f985880..84502fdd49 100644 --- a/pkg/controller/atlasdeployment/advanced_deployment.go +++ b/pkg/controller/atlasdeployment/advanced_deployment.go @@ -15,94 +15,94 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/compat" ) -func (r *AtlasDeploymentReconciler) ensureAdvancedDeploymentState(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment) (*mongodbatlas.AdvancedCluster, workflow.Result) { - advancedClusterSpec := cluster.Spec.AdvancedDeploymentSpec +func (r *AtlasDeploymentReconciler) ensureAdvancedDeploymentState(ctx *workflow.Context, project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment) (*mongodbatlas.AdvancedCluster, workflow.Result) { + advancedDeploymentSpec := deployment.Spec.AdvancedDeploymentSpec - advancedCluster, resp, err := ctx.Client.AdvancedClusters.Get(context.Background(), project.Status.ID, advancedClusterSpec.Name) + advancedDeployment, resp, err := ctx.Client.AdvancedClusters.Get(context.Background(), project.Status.ID, advancedDeploymentSpec.Name) if err != nil { if resp == nil { - return advancedCluster, workflow.Terminate(workflow.Internal, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.Internal, err.Error()) } if resp.StatusCode != http.StatusNotFound { - return advancedCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } - advancedCluster, err = advancedClusterSpec.AdvancedDeployment() + advancedDeployment, err = advancedDeploymentSpec.AdvancedDeployment() if err != nil { - return advancedCluster, workflow.Terminate(workflow.Internal, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.Internal, err.Error()) } - ctx.Log.Infof("Advanced Cluster %s doesn't exist in Atlas - creating", advancedClusterSpec.Name) - advancedCluster, _, err = ctx.Client.AdvancedClusters.Create(context.Background(), project.Status.ID, advancedCluster) + ctx.Log.Infof("Advanced Deployment %s doesn't exist in Atlas - creating", advancedDeploymentSpec.Name) + advancedDeployment, _, err = ctx.Client.AdvancedClusters.Create(context.Background(), project.Status.ID, advancedDeployment) if err != nil { - return advancedCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } } - switch advancedCluster.StateName { + switch advancedDeployment.StateName { case "IDLE": - return advancedClusterIdle(ctx, project, cluster, advancedCluster) + return advancedDeploymentIdle(ctx, project, deployment, advancedDeployment) case "CREATING": - return advancedCluster, workflow.InProgress(workflow.ClusterCreating, "cluster is provisioning") + return advancedDeployment, workflow.InProgress(workflow.DeploymentCreating, "deployment is provisioning") case "UPDATING", "REPAIRING": - return advancedCluster, workflow.InProgress(workflow.ClusterUpdating, "cluster is updating") + return advancedDeployment, workflow.InProgress(workflow.DeploymentUpdating, "deployment is updating") // TODO: add "DELETING", "DELETED", handle 404 on delete default: - return advancedCluster, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown cluster state %q", advancedCluster.StateName)) + return advancedDeployment, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown deployment state %q", advancedDeployment.StateName)) } } -func advancedClusterIdle(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, advancedCluster *mongodbatlas.AdvancedCluster) (*mongodbatlas.AdvancedCluster, workflow.Result) { - resultingCluster, err := MergedAdvancedDeployment(*advancedCluster, cluster.Spec) +func advancedDeploymentIdle(ctx *workflow.Context, project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment, advancedDeployment *mongodbatlas.AdvancedCluster) (*mongodbatlas.AdvancedCluster, workflow.Result) { + resultingDeployment, err := MergedAdvancedDeployment(*advancedDeployment, deployment.Spec) if err != nil { - return advancedCluster, workflow.Terminate(workflow.Internal, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.Internal, err.Error()) } - if done := AdvancedDeploymentsEqual(ctx.Log, *advancedCluster, resultingCluster); done { - return advancedCluster, workflow.OK() + if done := AdvancedDeploymentsEqual(ctx.Log, *advancedDeployment, resultingDeployment); done { + return advancedDeployment, workflow.OK() } - if cluster.Spec.AdvancedDeploymentSpec.Paused != nil { - if advancedCluster.Paused == nil || *advancedCluster.Paused != *cluster.Spec.AdvancedDeploymentSpec.Paused { + if deployment.Spec.AdvancedDeploymentSpec.Paused != nil { + if advancedDeployment.Paused == nil || *advancedDeployment.Paused != *deployment.Spec.AdvancedDeploymentSpec.Paused { // paused is different from Atlas // we need to first send a special (un)pause request before reconciling everything else - resultingCluster = mongodbatlas.AdvancedCluster{ - Paused: cluster.Spec.AdvancedDeploymentSpec.Paused, + resultingDeployment = mongodbatlas.AdvancedCluster{ + Paused: deployment.Spec.AdvancedDeploymentSpec.Paused, } } else { // otherwise, don't send the paused field - resultingCluster.Paused = nil + resultingDeployment.Paused = nil } } - resultingCluster = cleanupAdvancedDeployment(resultingCluster) + resultingDeployment = cleanupAdvancedDeployment(resultingDeployment) - advancedCluster, _, err = ctx.Client.AdvancedClusters.Update(context.Background(), project.Status.ID, cluster.Spec.AdvancedDeploymentSpec.Name, &resultingCluster) + advancedDeployment, _, err = ctx.Client.AdvancedClusters.Update(context.Background(), project.Status.ID, deployment.Spec.AdvancedDeploymentSpec.Name, &resultingDeployment) if err != nil { - return advancedCluster, workflow.Terminate(workflow.ClusterNotUpdatedInAtlas, err.Error()) + return advancedDeployment, workflow.Terminate(workflow.DeploymentNotUpdatedInAtlas, err.Error()) } - return nil, workflow.InProgress(workflow.ClusterUpdating, "cluster is updating") + return nil, workflow.InProgress(workflow.DeploymentUpdating, "deployment is updating") } -func cleanupAdvancedDeployment(cluster mongodbatlas.AdvancedCluster) mongodbatlas.AdvancedCluster { - cluster.ID = "" - cluster.MongoDBVersion = "" - cluster.StateName = "" - cluster.ConnectionStrings = nil - return cluster +func cleanupAdvancedDeployment(deployment mongodbatlas.AdvancedCluster) mongodbatlas.AdvancedCluster { + deployment.ID = "" + deployment.MongoDBVersion = "" + deployment.StateName = "" + deployment.ConnectionStrings = nil + return deployment } -// MergedAdvancedDeployment will return the result of merging AtlasDeploymentSpec with Atlas Advanced Cluster -func MergedAdvancedDeployment(advancedCluster mongodbatlas.AdvancedCluster, spec mdbv1.AtlasDeploymentSpec) (mongodbatlas.AdvancedCluster, error) { +// MergedAdvancedDeployment will return the result of merging AtlasDeploymentSpec with Atlas Advanced Deployment +func MergedAdvancedDeployment(advancedDeployment mongodbatlas.AdvancedCluster, spec mdbv1.AtlasDeploymentSpec) (mongodbatlas.AdvancedCluster, error) { result := mongodbatlas.AdvancedCluster{} - if err := compat.JSONCopy(&result, advancedCluster); err != nil { + if err := compat.JSONCopy(&result, advancedDeployment); err != nil { return result, err } @@ -110,7 +110,7 @@ func MergedAdvancedDeployment(advancedCluster mongodbatlas.AdvancedCluster, spec return result, err } - for i, replicationSpec := range advancedCluster.ReplicationSpecs { + for i, replicationSpec := range advancedDeployment.ReplicationSpecs { for k, v := range replicationSpec.RegionConfigs { // the response does not return backing provider names in some situations. // if this is the case, we want to strip these fields so they do not cause a bad comparison. @@ -122,48 +122,48 @@ func MergedAdvancedDeployment(advancedCluster mongodbatlas.AdvancedCluster, spec return result, nil } -// AdvancedDeploymentsEqual compares two Atlas Advanced Clusters -func AdvancedDeploymentsEqual(log *zap.SugaredLogger, clusterAtlas mongodbatlas.AdvancedCluster, clusterOperator mongodbatlas.AdvancedCluster) bool { - d := cmp.Diff(clusterAtlas, clusterOperator, cmpopts.EquateEmpty()) +// AdvancedDeploymentsEqual compares two Atlas Advanced Deployments +func AdvancedDeploymentsEqual(log *zap.SugaredLogger, deploymentAtlas mongodbatlas.AdvancedCluster, deploymentOperator mongodbatlas.AdvancedCluster) bool { + d := cmp.Diff(deploymentAtlas, deploymentOperator, cmpopts.EquateEmpty()) if d != "" { - log.Debugf("Clusters are different: %s", d) + log.Debugf("Deployments are different: %s", d) } return d == "" } -// GetAllClusterNames returns all cluster names including regular and advanced clusters. -func GetAllClusterNames(client mongodbatlas.Client, projectID string) ([]string, error) { - var clusterNames []string - clusters, _, err := client.Clusters.List(context.Background(), projectID, &mongodbatlas.ListOptions{}) +// GetAllDeploymentNames returns all deployment names including regular and advanced deployment. +func GetAllDeploymentNames(client mongodbatlas.Client, projectID string) ([]string, error) { + var deploymentNames []string + deployment, _, err := client.Clusters.List(context.Background(), projectID, &mongodbatlas.ListOptions{}) if err != nil { return nil, err } - advancedClusters, _, err := client.AdvancedClusters.List(context.Background(), projectID, &mongodbatlas.ListOptions{}) + advancedDeployments, _, err := client.AdvancedClusters.List(context.Background(), projectID, &mongodbatlas.ListOptions{}) if err != nil { return nil, err } - for _, c := range clusters { - clusterNames = append(clusterNames, c.Name) + for _, c := range deployment { + deploymentNames = append(deploymentNames, c.Name) } - for _, c := range advancedClusters.Results { - // based on configuration settings, some advanced clusters also show up in the regular clusters API. - // For these clusters, we don't want to duplicate the secret so we skip them. + for _, d := range advancedDeployments.Results { + // based on configuration settings, some advanced deployment also show up in the regular deployments API. + // For these deployments, we don't want to duplicate the secret so we skip them. found := false - for _, regularCluster := range clusters { - if regularCluster.Name == c.Name { + for _, regularDeployment := range deployment { + if regularDeployment.Name == d.Name { found = true break } } - // we only include cluster names which have not been handled by the regular cluster API. + // we only include deployment names which have not been handled by the regular deployment API. if !found { - clusterNames = append(clusterNames, c.Name) + deploymentNames = append(deploymentNames, d.Name) } } - return clusterNames, nil + return deploymentNames, nil } diff --git a/pkg/controller/atlasdeployment/atlasdeployment_controller.go b/pkg/controller/atlasdeployment/atlasdeployment_controller.go index e512e4cb52..340583b267 100644 --- a/pkg/controller/atlasdeployment/atlasdeployment_controller.go +++ b/pkg/controller/atlasdeployment/atlasdeployment_controller.go @@ -87,22 +87,22 @@ type AtlasDeploymentReconciler struct { func (r *AtlasDeploymentReconciler) Reconcile(context context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.With("atlasdeployment", req.NamespacedName) - cluster := &mdbv1.AtlasDeployment{} - result := customresource.PrepareResource(r.Client, req, cluster, log) + deployment := &mdbv1.AtlasDeployment{} + result := customresource.PrepareResource(r.Client, req, deployment, log) if !result.IsOk() { return result.ReconcileResult(), nil } - if shouldSkip := customresource.ReconciliationShouldBeSkipped(cluster); shouldSkip { - log.Infow(fmt.Sprintf("-> Skipping AtlasDeployment reconciliation as annotation %s=%s", customresource.ReconciliationPolicyAnnotation, customresource.ReconciliationPolicySkip), "spec", cluster.Spec) + if shouldSkip := customresource.ReconciliationShouldBeSkipped(deployment); shouldSkip { + log.Infow(fmt.Sprintf("-> Skipping AtlasDeployment reconciliation as annotation %s=%s", customresource.ReconciliationPolicyAnnotation, customresource.ReconciliationPolicySkip), "spec", deployment.Spec) return workflow.OK().ReconcileResult(), nil } - ctx := customresource.MarkReconciliationStarted(r.Client, cluster, log) - log.Infow("-> Starting AtlasDeployment reconciliation", "spec", cluster.Spec, "status", cluster.Status) - defer statushandler.Update(ctx, r.Client, r.EventRecorder, cluster) + ctx := customresource.MarkReconciliationStarted(r.Client, deployment, log) + log.Infow("-> Starting AtlasDeployment reconciliation", "spec", deployment.Spec, "status", deployment.Status) + defer statushandler.Update(ctx, r.Client, r.EventRecorder, deployment) - if err := validate.ClusterSpec(cluster.Spec); err != nil { + if err := validate.DeploymentSpec(deployment.Spec); err != nil { result := workflow.Terminate(workflow.Internal, err.Error()) ctx.SetConditionFromResult(status.ValidationSucceeded, result) return result.ReconcileResult(), nil @@ -110,15 +110,15 @@ func (r *AtlasDeploymentReconciler) Reconcile(context context.Context, req ctrl. ctx.SetConditionTrue(status.ValidationSucceeded) project := &mdbv1.AtlasProject{} - if result := r.readProjectResource(cluster, project); !result.IsOk() { - ctx.SetConditionFromResult(status.ClusterReadyType, result) + if result := r.readProjectResource(deployment, project); !result.IsOk() { + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result.ReconcileResult(), nil } connection, err := atlas.ReadConnection(log, r.Client, r.GlobalAPISecret, project.ConnectionSecretObjectKey()) if err != nil { result := workflow.Terminate(workflow.AtlasCredentialsNotProvided, err.Error()) - ctx.SetConditionFromResult(status.ClusterReadyType, result) + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result.ReconcileResult(), nil } ctx.Connection = connection @@ -126,23 +126,23 @@ func (r *AtlasDeploymentReconciler) Reconcile(context context.Context, req ctrl. atlasClient, err := atlas.Client(r.AtlasDomain, connection, log) if err != nil { result := workflow.Terminate(workflow.Internal, err.Error()) - ctx.SetConditionFromResult(status.ClusterReadyType, result) + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result.ReconcileResult(), nil } ctx.Client = atlasClient // Allow users to specify M0/M2/M5 clusters without providing TENANT for Normal and Serverless clusters - r.verifyNonTenantCase(cluster) + r.verifyNonTenantCase(deployment) - handleCluster := r.selectClusterHandler(cluster) - if result, _ := handleCluster(ctx, project, cluster, req); !result.IsOk() { - ctx.SetConditionFromResult(status.ClusterReadyType, result) + handleDeployment := r.selectDeploymentHandler(deployment) + if result, _ := handleDeployment(ctx, project, deployment, req); !result.IsOk() { + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result.ReconcileResult(), nil } - if !cluster.IsServerless() { - if result := r.handleAdvancedOptions(ctx, project, cluster); !result.IsOk() { - ctx.SetConditionFromResult(status.ClusterReadyType, result) + if !deployment.IsServerless() { + if result := r.handleAdvancedOptions(ctx, project, deployment); !result.IsOk() { + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result.ReconcileResult(), nil } } @@ -190,34 +190,34 @@ func modifyProviderSettings(pSettings *mdbv1.ProviderSettingsSpec, clusterType s } } -func (r *AtlasDeploymentReconciler) selectClusterHandler(cluster *mdbv1.AtlasDeployment) clusterHandlerFunc { +func (r *AtlasDeploymentReconciler) selectDeploymentHandler(cluster *mdbv1.AtlasDeployment) clusterHandlerFunc { if cluster.IsAdvancedDeployment() { return r.handleAdvancedDeployment } if cluster.IsServerless() { return r.handleServerlessInstance } - return r.handleRegularCluster + return r.handleRegularDeployment } -func (r *AtlasDeploymentReconciler) handleClusterBackupSchedule(ctx *workflow.Context, c *mdbv1.AtlasDeployment, projectID, cName string, backupEnabled bool, req ctrl.Request) error { - if c.Spec.BackupScheduleRef.Name == "" && c.Spec.BackupScheduleRef.Namespace == "" { +func (r *AtlasDeploymentReconciler) handleDeploymentBackupSchedule(ctx *workflow.Context, deployment *mdbv1.AtlasDeployment, projectID, cName string, backupEnabled bool, req ctrl.Request) error { + if deployment.Spec.BackupScheduleRef.Name == "" && deployment.Spec.BackupScheduleRef.Namespace == "" { r.Log.Debug("no backup schedule configured for the cluster") return nil } if !backupEnabled { - return fmt.Errorf("can not proceed with backup schedule. Backups are not enabled for cluster %v", c.ClusterName) + return fmt.Errorf("can not proceed with backup schedule. Backups are not enabled for cluster %v", deployment.ClusterName) } resourcesToWatch := []watch.WatchedObject{} // Process backup schedule bSchedule := &mdbv1.AtlasBackupSchedule{} - bKey := types.NamespacedName{Namespace: c.Spec.BackupScheduleRef.Namespace, Name: c.Spec.BackupScheduleRef.Name} + bKey := types.NamespacedName{Namespace: deployment.Spec.BackupScheduleRef.Namespace, Name: deployment.Spec.BackupScheduleRef.Name} err := r.Client.Get(context.Background(), bKey, bSchedule) if err != nil { - return fmt.Errorf("%v backupschedule resource is not found. e: %w", c.Spec.BackupScheduleRef, err) + return fmt.Errorf("%v backupschedule resource is not found. e: %w", deployment.Spec.BackupScheduleRef, err) } resourcesToWatch = append(resourcesToWatch, watch.WatchedObject{ResourceKind: bSchedule.Kind, Resource: bKey}) @@ -277,7 +277,7 @@ func (r *AtlasDeploymentReconciler) handleClusterBackupSchedule(ctx *workflow.Co return nil } -// handleAdvancedDeployment ensures the state of the cluster using the Advanced Cluster API +// handleAdvancedDeployment ensures the state of the cluster using the Advanced Deployment API func (r *AtlasDeploymentReconciler) handleAdvancedDeployment(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, req reconcile.Request) (workflow.Result, error) { c, result := r.ensureAdvancedDeploymentState(ctx, project, cluster) if c != nil && c.StateName != "" { @@ -288,9 +288,9 @@ func (r *AtlasDeploymentReconciler) handleAdvancedDeployment(ctx *workflow.Conte return result, nil } - if err := r.handleClusterBackupSchedule(ctx, cluster, project.ID(), c.Name, *c.BackupEnabled, req); err != nil { + if err := r.handleDeploymentBackupSchedule(ctx, cluster, project.ID(), c.Name, *c.BackupEnabled, req); err != nil { result := workflow.Terminate(workflow.Internal, err.Error()) - ctx.SetConditionFromResult(status.ClusterReadyType, result) + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result, nil } @@ -299,7 +299,7 @@ func (r *AtlasDeploymentReconciler) handleAdvancedDeployment(ctx *workflow.Conte } ctx. - SetConditionTrue(status.ClusterReadyType). + SetConditionTrue(status.DeploymentReadyType). EnsureStatusOption(status.AtlasDeploymentMongoDBVersionOption(c.MongoDBVersion)). EnsureStatusOption(status.AtlasDeploymentConnectionStringsOption(c.ConnectionStrings)) @@ -313,9 +313,9 @@ func (r *AtlasDeploymentReconciler) handleServerlessInstance(ctx *workflow.Conte return r.ensureConnectionSecretsAndSetStatusOptions(ctx, project, cluster, result, c) } -// handleRegularCluster ensures the state of the cluster using the Regular Cluster API -func (r *AtlasDeploymentReconciler) handleRegularCluster(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, req reconcile.Request) (workflow.Result, error) { - c, result := ensureClusterState(ctx, project, cluster) +// handleRegularDeployment ensures the state of the cluster using the Regular Deployment API +func (r *AtlasDeploymentReconciler) handleRegularDeployment(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, req reconcile.Request) (workflow.Result, error) { + c, result := ensureDeploymentState(ctx, project, cluster) if c != nil && c.StateName != "" { ctx.EnsureStatusOption(status.AtlasDeploymentStateNameOption(c.StateName)) } @@ -324,9 +324,9 @@ func (r *AtlasDeploymentReconciler) handleRegularCluster(ctx *workflow.Context, return result, nil } - if err := r.handleClusterBackupSchedule(ctx, cluster, project.ID(), c.Name, *c.ProviderBackupEnabled || *c.BackupEnabled, req); err != nil { + if err := r.handleDeploymentBackupSchedule(ctx, cluster, project.ID(), c.Name, *c.ProviderBackupEnabled || *c.BackupEnabled, req); err != nil { result := workflow.Terminate(workflow.Internal, err.Error()) - ctx.SetConditionFromResult(status.ClusterReadyType, result) + ctx.SetConditionFromResult(status.DeploymentReadyType, result) return result, nil } return r.ensureConnectionSecretsAndSetStatusOptions(ctx, project, cluster, result, c) @@ -334,49 +334,49 @@ func (r *AtlasDeploymentReconciler) handleRegularCluster(ctx *workflow.Context, // ensureConnectionSecretsAndSetStatusOptions creates the relevant connection secrets and sets // status options to the given context. This function can be used for regular clusters and serverless instances -func (r *AtlasDeploymentReconciler) ensureConnectionSecretsAndSetStatusOptions(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, result workflow.Result, c *mongodbatlas.Cluster) (workflow.Result, error) { - if c != nil && c.StateName != "" { - ctx.EnsureStatusOption(status.AtlasDeploymentStateNameOption(c.StateName)) +func (r *AtlasDeploymentReconciler) ensureConnectionSecretsAndSetStatusOptions(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, result workflow.Result, deployment *mongodbatlas.Cluster) (workflow.Result, error) { + if deployment != nil && deployment.StateName != "" { + ctx.EnsureStatusOption(status.AtlasDeploymentStateNameOption(deployment.StateName)) } if !result.IsOk() { return result, nil } - if csResult := r.ensureConnectionSecrets(ctx, project, c.Name, c.ConnectionStrings, cluster); !csResult.IsOk() { + if csResult := r.ensureConnectionSecrets(ctx, project, deployment.Name, deployment.ConnectionStrings, cluster); !csResult.IsOk() { return csResult, nil } ctx. - SetConditionTrue(status.ClusterReadyType). - EnsureStatusOption(status.AtlasDeploymentMongoDBVersionOption(c.MongoDBVersion)). - EnsureStatusOption(status.AtlasDeploymentConnectionStringsOption(c.ConnectionStrings)). - EnsureStatusOption(status.AtlasDeploymentMongoURIUpdatedOption(c.MongoURIUpdated)) + SetConditionTrue(status.DeploymentReadyType). + EnsureStatusOption(status.AtlasDeploymentMongoDBVersionOption(deployment.MongoDBVersion)). + EnsureStatusOption(status.AtlasDeploymentConnectionStringsOption(deployment.ConnectionStrings)). + EnsureStatusOption(status.AtlasDeploymentMongoURIUpdatedOption(deployment.MongoURIUpdated)) ctx.SetConditionTrue(status.ReadyType) return result, nil } -func (r *AtlasDeploymentReconciler) handleAdvancedOptions(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment) workflow.Result { - clusterName := cluster.GetClusterName() - atlasArgs, _, err := ctx.Client.Clusters.GetProcessArgs(context.Background(), project.Status.ID, clusterName) +func (r *AtlasDeploymentReconciler) handleAdvancedOptions(ctx *workflow.Context, project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment) workflow.Result { + deploymentName := deployment.GetDeploymentName() + atlasArgs, _, err := ctx.Client.Clusters.GetProcessArgs(context.Background(), project.Status.ID, deploymentName) if err != nil { return workflow.Terminate(workflow.Internal, "cannot get process args") } - if cluster.Spec.ProcessArgs == nil { + if deployment.Spec.ProcessArgs == nil { return workflow.OK() } - if !cluster.Spec.ProcessArgs.IsEqual(atlasArgs) { - options := mongodbatlas.ProcessArgs(*cluster.Spec.ProcessArgs) - args, resp, err := ctx.Client.Clusters.UpdateProcessArgs(context.Background(), project.Status.ID, clusterName, &options) + if !deployment.Spec.ProcessArgs.IsEqual(atlasArgs) { + options := mongodbatlas.ProcessArgs(*deployment.Spec.ProcessArgs) + args, resp, err := ctx.Client.Clusters.UpdateProcessArgs(context.Background(), project.Status.ID, deploymentName, &options) ctx.Log.Debugw("ProcessArgs Update", "args", args, "resp", resp.Body, "err", err) if err != nil { return workflow.Terminate(workflow.Internal, "cannot update process args") } - workflow.InProgress(workflow.ClusterAdvancedOptionsAreNotReady, "cluster Advanced Configuration Options are being updated") + workflow.InProgress(workflow.DeploymentAdvancedOptionsAreNotReady, "deployment Advanced Configuration Options are being updated") } return workflow.OK() @@ -425,7 +425,7 @@ func (r *AtlasDeploymentReconciler) Delete(e event.DeleteEvent) error { return nil } - log := r.Log.With("atlascluster", kube.ObjectKeyFromObject(cluster)) + log := r.Log.With("atlasdeployment", kube.ObjectKeyFromObject(cluster)) log.Infow("-> Starting AtlasDeployment deletion", "spec", cluster.Spec) @@ -434,16 +434,16 @@ func (r *AtlasDeploymentReconciler) Delete(e event.DeleteEvent) error { return errors.New("cannot read project resource") } - log = log.With("projectID", project.Status.ID, "clusterName", cluster.GetClusterName()) + log = log.With("projectID", project.Status.ID, "clusterName", cluster.GetDeploymentName()) if customresource.ResourceShouldBeLeftInAtlas(cluster) { - log.Infof("Not removing Atlas Cluster from Atlas as the '%s' annotation is set", customresource.ResourcePolicyAnnotation) - } else if err := r.deleteClusterFromAtlas(cluster, project, log); err != nil { + log.Infof("Not removing Atlas Deployment from Atlas as the '%s' annotation is set", customresource.ResourcePolicyAnnotation) + } else if err := r.deleteDeploymentFromAtlas(cluster, project, log); err != nil { log.Error("Failed to remove cluster from Atlas: %s", err) } // We always remove the connection secrets even if the cluster is not removed from Atlas - secrets, err := connectionsecret.ListByClusterName(r.Client, cluster.Namespace, project.ID(), cluster.GetClusterName()) + secrets, err := connectionsecret.ListByDeploymentName(r.Client, cluster.Namespace, project.ID(), cluster.GetDeploymentName()) if err != nil { return fmt.Errorf("failed to find connection secrets for the user: %w", err) } @@ -460,7 +460,7 @@ func (r *AtlasDeploymentReconciler) Delete(e event.DeleteEvent) error { return nil } -func (r *AtlasDeploymentReconciler) deleteClusterFromAtlas(cluster *mdbv1.AtlasDeployment, project *mdbv1.AtlasProject, log *zap.SugaredLogger) error { +func (r *AtlasDeploymentReconciler) deleteDeploymentFromAtlas(cluster *mdbv1.AtlasDeployment, project *mdbv1.AtlasProject, log *zap.SugaredLogger) error { connection, err := atlas.ReadConnection(log, r.Client, r.GlobalAPISecret, project.ConnectionSecretObjectKey()) if err != nil { return err @@ -475,24 +475,24 @@ func (r *AtlasDeploymentReconciler) deleteClusterFromAtlas(cluster *mdbv1.AtlasD timeout := time.Now().Add(workflow.DefaultTimeout) for time.Now().Before(timeout) { - deleteClusterFunc := atlasClient.Clusters.Delete + deleteDeploymentFunc := atlasClient.Clusters.Delete if cluster.Spec.AdvancedDeploymentSpec != nil { - deleteClusterFunc = atlasClient.AdvancedClusters.Delete + deleteDeploymentFunc = atlasClient.AdvancedClusters.Delete } if cluster.IsServerless() { - deleteClusterFunc = atlasClient.ServerlessInstances.Delete + deleteDeploymentFunc = atlasClient.ServerlessInstances.Delete } - _, err = deleteClusterFunc(context.Background(), project.Status.ID, cluster.GetClusterName()) + _, err = deleteDeploymentFunc(context.Background(), project.Status.ID, cluster.GetDeploymentName()) var apiError *mongodbatlas.ErrorResponse if errors.As(err, &apiError) && apiError.ErrorCode == atlas.ClusterNotFound { - log.Info("Cluster doesn't exist or is already deleted") + log.Info("Deployment doesn't exist or is already deleted") return } if err != nil { - log.Errorw("Cannot delete Atlas cluster", "error", err) + log.Errorw("Cannot delete Atlas deployment", "error", err) time.Sleep(workflow.DefaultRetry) continue } diff --git a/pkg/controller/atlasdeployment/deployment.go b/pkg/controller/atlasdeployment/deployment.go index 17a52a3073..e0bdad5e5d 100644 --- a/pkg/controller/atlasdeployment/deployment.go +++ b/pkg/controller/atlasdeployment/deployment.go @@ -21,103 +21,103 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/stringutil" ) -func ensureClusterState(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment) (atlasCluster *mongodbatlas.Cluster, _ workflow.Result) { - atlasCluster, resp, err := ctx.Client.Clusters.Get(context.Background(), project.Status.ID, cluster.Spec.DeploymentSpec.Name) +func ensureDeploymentState(ctx *workflow.Context, project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment) (atlasDeployment *mongodbatlas.Cluster, _ workflow.Result) { + atlasDeployment, resp, err := ctx.Client.Clusters.Get(context.Background(), project.Status.ID, deployment.Spec.DeploymentSpec.Name) if err != nil { if resp == nil { - return atlasCluster, workflow.Terminate(workflow.Internal, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.Internal, err.Error()) } if resp.StatusCode != http.StatusNotFound { - return atlasCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } - atlasCluster, err = cluster.Spec.Cluster() + atlasDeployment, err = deployment.Spec.Deployment() if err != nil { - return atlasCluster, workflow.Terminate(workflow.Internal, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.Internal, err.Error()) } - ctx.Log.Infof("Cluster %s doesn't exist in Atlas - creating", cluster.Spec.DeploymentSpec.Name) - atlasCluster, _, err = ctx.Client.Clusters.Create(context.Background(), project.Status.ID, atlasCluster) + ctx.Log.Infof("Deployment %s doesn't exist in Atlas - creating", deployment.Spec.DeploymentSpec.Name) + atlasDeployment, _, err = ctx.Client.Clusters.Create(context.Background(), project.Status.ID, atlasDeployment) if err != nil { - return atlasCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } } - switch atlasCluster.StateName { + switch atlasDeployment.StateName { case "IDLE": - return regularClusterIdle(ctx, project, cluster, atlasCluster) + return regularDeploymentIdle(ctx, project, deployment, atlasDeployment) case "CREATING": - return atlasCluster, workflow.InProgress(workflow.ClusterCreating, "cluster is provisioning") + return atlasDeployment, workflow.InProgress(workflow.DeploymentCreating, "deployment is provisioning") case "UPDATING", "REPAIRING": - return atlasCluster, workflow.InProgress(workflow.ClusterUpdating, "cluster is updating") + return atlasDeployment, workflow.InProgress(workflow.DeploymentUpdating, "deployment is updating") // TODO: add "DELETING", "DELETED", handle 404 on delete default: - return atlasCluster, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown cluster state %q", atlasCluster.StateName)) + return atlasDeployment, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown deployment state %q", atlasDeployment.StateName)) } } -func regularClusterIdle(ctx *workflow.Context, project *mdbv1.AtlasProject, cluster *mdbv1.AtlasDeployment, atlasCluster *mongodbatlas.Cluster) (*mongodbatlas.Cluster, workflow.Result) { - resultingCluster, err := MergedCluster(*atlasCluster, cluster.Spec) +func regularDeploymentIdle(ctx *workflow.Context, project *mdbv1.AtlasProject, deployment *mdbv1.AtlasDeployment, atlasDeployment *mongodbatlas.Cluster) (*mongodbatlas.Cluster, workflow.Result) { + resultingDeployment, err := MergedDeployment(*atlasDeployment, deployment.Spec) if err != nil { - return atlasCluster, workflow.Terminate(workflow.Internal, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.Internal, err.Error()) } - if done := ClustersEqual(ctx.Log, *atlasCluster, resultingCluster); done { - return atlasCluster, workflow.OK() + if done := DeploymentsEqual(ctx.Log, *atlasDeployment, resultingDeployment); done { + return atlasDeployment, workflow.OK() } - if cluster.Spec.DeploymentSpec.Paused != nil { - if atlasCluster.Paused == nil || *atlasCluster.Paused != *cluster.Spec.DeploymentSpec.Paused { + if deployment.Spec.DeploymentSpec.Paused != nil { + if atlasDeployment.Paused == nil || *atlasDeployment.Paused != *deployment.Spec.DeploymentSpec.Paused { // paused is different from Atlas // we need to first send a special (un)pause request before reconciling everything else - resultingCluster = mongodbatlas.Cluster{ - Paused: cluster.Spec.DeploymentSpec.Paused, + resultingDeployment = mongodbatlas.Cluster{ + Paused: deployment.Spec.DeploymentSpec.Paused, } } else { // otherwise, don't send the paused field - resultingCluster.Paused = nil + resultingDeployment.Paused = nil } } - resultingCluster = cleanupCluster(resultingCluster) + resultingDeployment = cleanupDeployment(resultingDeployment) - // Handle shared (M0,M2,M5) cluster to non-shared cluster upgrade - scheduled, err := handleSharedClusterUpgrade(ctx, atlasCluster, &resultingCluster) + // Handle shared (M0,M2,M5) deployment to non-shared deployment upgrade + scheduled, err := handleSharedDeploymentUpgrade(ctx, atlasDeployment, &resultingDeployment) if err != nil { - return atlasCluster, workflow.Terminate(workflow.Internal, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.Internal, err.Error()) } if scheduled { - return atlasCluster, workflow.InProgress(workflow.ClusterUpdating, "cluster is upgrading") + return atlasDeployment, workflow.InProgress(workflow.DeploymentUpdating, "deployment is upgrading") } - atlasCluster, _, err = ctx.Client.Clusters.Update(context.Background(), project.Status.ID, cluster.Spec.DeploymentSpec.Name, &resultingCluster) + atlasDeployment, _, err = ctx.Client.Clusters.Update(context.Background(), project.Status.ID, deployment.Spec.DeploymentSpec.Name, &resultingDeployment) if err != nil { - return atlasCluster, workflow.Terminate(workflow.ClusterNotUpdatedInAtlas, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.DeploymentNotUpdatedInAtlas, err.Error()) } - return atlasCluster, workflow.InProgress(workflow.ClusterUpdating, "cluster is updating") + return atlasDeployment, workflow.InProgress(workflow.DeploymentUpdating, "deployment is updating") } -// cleanupCluster will unset some fields that cannot be changed via API or are deprecated. -func cleanupCluster(cluster mongodbatlas.Cluster) mongodbatlas.Cluster { - cluster.ID = "" - cluster.MongoDBVersion = "" - cluster.MongoURI = "" - cluster.MongoURIUpdated = "" - cluster.MongoURIWithOptions = "" - cluster.SrvAddress = "" - cluster.StateName = "" - cluster.ReplicationFactor = nil - cluster.ReplicationSpec = nil - cluster.ConnectionStrings = nil - cluster = removeOutdatedFields(&cluster, nil) - - return cluster +// cleanupDeployment will unset some fields that cannot be changed via API or are deprecated. +func cleanupDeployment(deployment mongodbatlas.Cluster) mongodbatlas.Cluster { + deployment.ID = "" + deployment.MongoDBVersion = "" + deployment.MongoURI = "" + deployment.MongoURIUpdated = "" + deployment.MongoURIWithOptions = "" + deployment.SrvAddress = "" + deployment.StateName = "" + deployment.ReplicationFactor = nil + deployment.ReplicationSpec = nil + deployment.ConnectionStrings = nil + deployment = removeOutdatedFields(&deployment, nil) + + return deployment } // removeOutdatedFields unsets fields which are should be empty based on flags @@ -142,9 +142,9 @@ func removeOutdatedFields(removeFrom *mongodbatlas.Cluster, lookAt *mongodbatlas return result } -// MergedCluster will return the result of merging AtlasDeploymentSpec with Atlas Cluster -func MergedCluster(atlasCluster mongodbatlas.Cluster, spec mdbv1.AtlasDeploymentSpec) (result mongodbatlas.Cluster, err error) { - if err = compat.JSONCopy(&result, atlasCluster); err != nil { +// MergedDeployment will return the result of merging AtlasDeploymentSpec with Atlas Deployment +func MergedDeployment(atlasDeployment mongodbatlas.Cluster, spec mdbv1.AtlasDeploymentSpec) (result mongodbatlas.Cluster, err error) { + if err = compat.JSONCopy(&result, atlasDeployment); err != nil { return } @@ -154,21 +154,21 @@ func MergedCluster(atlasCluster mongodbatlas.Cluster, spec mdbv1.AtlasDeployment mergeRegionConfigs(result.ReplicationSpecs, spec.DeploymentSpec.ReplicationSpecs) - // According to the docs for 'providerSettings.regionName' (https://docs.atlas.mongodb.com/reference/api/clusters-create-one/): - // "Don't specify this parameter when creating a multi-region cluster using the replicationSpec object or a Global - // Cluster with the replicationSpecs array." + // According to the docs for 'providerSettings.regionName' (https://docs.atlas.mongodb.com/reference/api/deployments-create-one/): + // "Don't specify this parameter when creating a multi-region deployment using the replicationSpec object or a Global + // Deployment with the replicationSpecs array." // The problem is that Atlas API accepts the create/update request but then returns the 'ProviderSettings.RegionName' empty in GET request // So we need to consider this while comparing (to avoid perpetual updates) - if len(result.ReplicationSpecs) > 0 && atlasCluster.ProviderSettings.RegionName == "" { + if len(result.ReplicationSpecs) > 0 && atlasDeployment.ProviderSettings.RegionName == "" { result.ProviderSettings.RegionName = "" } return } -// mergeRegionConfigs removes replicationSpecs[i].RegionsConfigs[key] from Atlas Cluster that are absent in Operator. +// mergeRegionConfigs removes replicationSpecs[i].RegionsConfigs[key] from Atlas Deployment that are absent in Operator. // Dev idea: this could have been added into some more generic method like `JSONCopy` or something wrapping it to make -// sure any Atlas map get redundant keys removed. So far there's only one map in Cluster ('RegionsConfig') so we'll do this +// sure any Atlas map get redundant keys removed. So far there's only one map in Deployment ('RegionsConfig') so we'll do this // explicitly - but may make sense to refactor this later if more maps are added (and all follow the same logic). func mergeRegionConfigs(atlasSpecs []mongodbatlas.ReplicationSpec, operatorSpecs []mdbv1.ReplicationSpec) { for i, operatorSpec := range operatorSpecs { @@ -186,20 +186,20 @@ func mergeRegionConfigs(atlasSpecs []mongodbatlas.ReplicationSpec, operatorSpecs } } -// ClustersEqual compares two Atlas Clusters -func ClustersEqual(log *zap.SugaredLogger, clusterAtlas mongodbatlas.Cluster, clusterOperator mongodbatlas.Cluster) bool { - clusterAtlas = removeOutdatedFields(&clusterAtlas, &clusterOperator) - clusterOperator = removeOutdatedFields(&clusterOperator, nil) +// DeploymentsEqual compares two Atlas Deployments +func DeploymentsEqual(log *zap.SugaredLogger, deploymentAtlas mongodbatlas.Cluster, deploymentOperator mongodbatlas.Cluster) bool { + deploymentAtlas = removeOutdatedFields(&deploymentAtlas, &deploymentOperator) + deploymentOperator = removeOutdatedFields(&deploymentOperator, nil) - d := cmp.Diff(clusterAtlas, clusterOperator, cmpopts.EquateEmpty()) + d := cmp.Diff(deploymentAtlas, deploymentOperator, cmpopts.EquateEmpty()) if d != "" { - log.Debugf("Clusters are different: %s", d) + log.Debugf("Deployments are different: %s", d) } return d == "" } -func (r *AtlasDeploymentReconciler) ensureConnectionSecrets(ctx *workflow.Context, project *mdbv1.AtlasProject, name string, connectionStrings *mongodbatlas.ConnectionStrings, clusterResource *mdbv1.AtlasDeployment) workflow.Result { +func (r *AtlasDeploymentReconciler) ensureConnectionSecrets(ctx *workflow.Context, project *mdbv1.AtlasProject, name string, connectionStrings *mongodbatlas.ConnectionStrings, deploymentResource *mdbv1.AtlasDeployment) workflow.Result { databaseUsers := mdbv1.AtlasDatabaseUserList{} err := r.Client.List(context.TODO(), &databaseUsers, client.InNamespace(project.Namespace)) if err != nil { @@ -221,14 +221,14 @@ func (r *AtlasDeploymentReconciler) ensureConnectionSecrets(ctx *workflow.Contex continue } - scopes := dbUser.GetScopes(mdbv1.ClusterScopeType) + scopes := dbUser.GetScopes(mdbv1.DeploymentScopeType) if len(scopes) != 0 && !stringutil.Contains(scopes, name) { continue } password, err := dbUser.ReadPassword(r.Client) if err != nil { - return workflow.Terminate(workflow.ClusterConnectionSecretsNotCreated, err.Error()) + return workflow.Terminate(workflow.DeploymentConnectionSecretsNotCreated, err.Error()) } data := connectionsecret.ConnectionData{ @@ -240,33 +240,33 @@ func (r *AtlasDeploymentReconciler) ensureConnectionSecrets(ctx *workflow.Contex secretName, err := connectionsecret.Ensure(r.Client, project.Namespace, project.Spec.Name, project.ID(), name, data) if err != nil { - return workflow.Terminate(workflow.ClusterConnectionSecretsNotCreated, err.Error()) + return workflow.Terminate(workflow.DeploymentConnectionSecretsNotCreated, err.Error()) } secrets = append(secrets, secretName) } if len(secrets) > 0 { - r.EventRecorder.Eventf(clusterResource, "Normal", "ConnectionSecretsEnsured", "Connection Secrets were created/updated: %s", strings.Join(secrets, ", ")) + r.EventRecorder.Eventf(deploymentResource, "Normal", "ConnectionSecretsEnsured", "Connection Secrets were created/updated: %s", strings.Join(secrets, ", ")) } return workflow.OK() } -func handleSharedClusterUpgrade(ctx *workflow.Context, current *mongodbatlas.Cluster, new *mongodbatlas.Cluster) (scheduled bool, _ error) { - baseErr := "can not perform cluster upgrade. ERR: %v" - if !clusterShouldBeUpgraded(current, new) { - ctx.Log.Debug("cluster shouldn't be upgraded") +func handleSharedDeploymentUpgrade(ctx *workflow.Context, current *mongodbatlas.Cluster, new *mongodbatlas.Cluster) (scheduled bool, _ error) { + baseErr := "can not perform deployment upgrade. ERR: %v" + if !deploymentShouldBeUpgraded(current, new) { + ctx.Log.Debug("deployment shouldn't be upgraded") return false, nil } // Remove backingProviderName new.ProviderSettings.BackingProviderName = "" - ctx.Log.Infof("performing cluster upgrade from %s, to %s", + ctx.Log.Infof("performing deployment upgrade from %s, to %s", current.ProviderSettings.InstanceSizeName, new.ProviderSettings.InstanceSizeName) // TODO: Replace with the go-atlas-client when this method will be added to go-atlas-client atlasClient := ctx.Client - urlStr := fmt.Sprintf("/api/atlas/v1.0/groups/%s/clusters/tenantUpgrade", current.GroupID) + urlStr := fmt.Sprintf("/api/atlas/v1.0/groups/%s/deployments/tenantUpgrade", current.GroupID) req, err := atlasClient.NewRequest(context.Background(), http.MethodPost, urlStr, new) if err != nil { return false, fmt.Errorf(baseErr, err) @@ -280,14 +280,14 @@ func handleSharedClusterUpgrade(ctx *workflow.Context, current *mongodbatlas.Clu return true, nil } -func clusterShouldBeUpgraded(current *mongodbatlas.Cluster, new *mongodbatlas.Cluster) bool { - if isSharedCluster(current.ProviderSettings.InstanceSizeName) && !isSharedCluster(new.ProviderSettings.InstanceSizeName) { +func deploymentShouldBeUpgraded(current *mongodbatlas.Cluster, new *mongodbatlas.Cluster) bool { + if isSharedDeployment(current.ProviderSettings.InstanceSizeName) && !isSharedDeployment(new.ProviderSettings.InstanceSizeName) { return true } return false } -func isSharedCluster(instanceSizeName string) bool { +func isSharedDeployment(instanceSizeName string) bool { switch strings.ToUpper(instanceSizeName) { case "M0", "M2", "M5": return true diff --git a/pkg/controller/atlasdeployment/deployment_test.go b/pkg/controller/atlasdeployment/deployment_test.go index 3ab03bbf6b..185c14dd32 100644 --- a/pkg/controller/atlasdeployment/deployment_test.go +++ b/pkg/controller/atlasdeployment/deployment_test.go @@ -17,15 +17,15 @@ func init() { zap.ReplaceGlobals(logger) } -func TestClusterMatchesSpec(t *testing.T) { - t.Run("Clusters match (enums)", func(t *testing.T) { - atlasCluster := mongodbatlas.Cluster{ +func TestDeploymentMatchesSpec(t *testing.T) { + t.Run("Deployments match (enums)", func(t *testing.T) { + atlasDeployment := mongodbatlas.Cluster{ ProviderSettings: &mongodbatlas.ProviderSettings{ ProviderName: "AWS", }, ClusterType: "GEOSHARDED", } - operatorCluster := mdbv1.AtlasDeploymentSpec{ + operatorDeployment := mdbv1.AtlasDeploymentSpec{ DeploymentSpec: &mdbv1.DeploymentSpec{ ProviderSettings: &mdbv1.ProviderSettingsSpec{ ProviderName: provider.ProviderAWS, @@ -34,56 +34,56 @@ func TestClusterMatchesSpec(t *testing.T) { }, } - merged, err := MergedCluster(atlasCluster, operatorCluster) + merged, err := MergedDeployment(atlasDeployment, operatorDeployment) assert.NoError(t, err) - equal := ClustersEqual(zap.S(), atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), atlasDeployment, merged) assert.True(t, equal) }) - t.Run("Clusters don't match (enums)", func(t *testing.T) { - atlasClusterEnum := mongodbatlas.Cluster{ClusterType: "GEOSHARDED"} - operatorClusterEnum := mdbv1.AtlasDeploymentSpec{DeploymentSpec: &mdbv1.DeploymentSpec{ClusterType: mdbv1.TypeReplicaSet}} + t.Run("Deployments don't match (enums)", func(t *testing.T) { + atlasDeploymentEnum := mongodbatlas.Cluster{ClusterType: "GEOSHARDED"} + operatorDeploymentEnum := mdbv1.AtlasDeploymentSpec{DeploymentSpec: &mdbv1.DeploymentSpec{ClusterType: mdbv1.TypeReplicaSet}} - merged, err := MergedCluster(atlasClusterEnum, operatorClusterEnum) + merged, err := MergedDeployment(atlasDeploymentEnum, operatorDeploymentEnum) assert.NoError(t, err) - equal := ClustersEqual(zap.S(), atlasClusterEnum, merged) + equal := DeploymentsEqual(zap.S(), atlasDeploymentEnum, merged) assert.False(t, equal) }) - t.Run("Clusters match (ProviderSettings.RegionName ignored)", func(t *testing.T) { - common := mdbv1.DefaultAWSCluster("test-ns", "project-name") + t.Run("Deployments match (ProviderSettings.RegionName ignored)", func(t *testing.T) { + common := mdbv1.DefaultAWSDeployment("test-ns", "project-name") // Note, that in reality it seems that Atlas nullifies ProviderSettings.RegionName only if RegionsConfig are specified // but it's ok not to overcomplicate common.Spec.DeploymentSpec.ReplicationSpecs = append(common.Spec.DeploymentSpec.ReplicationSpecs, mdbv1.ReplicationSpec{ NumShards: int64ptr(2), }) // Emulating Atlas behavior when it nullifies the ProviderSettings.RegionName - atlasCluster, err := common.DeepCopy().WithRegionName("").Spec.Cluster() + atlasDeployment, err := common.DeepCopy().WithRegionName("").Spec.Deployment() assert.NoError(t, err) - operatorCluster := common.DeepCopy() + operatorDeployment := common.DeepCopy() - merged, err := MergedCluster(*atlasCluster, operatorCluster.Spec) + merged, err := MergedDeployment(*atlasDeployment, operatorDeployment.Spec) assert.NoError(t, err) - equal := ClustersEqual(zap.S(), *atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), *atlasDeployment, merged) assert.True(t, equal) }) - t.Run("Clusters don't match (ProviderSettings.RegionName was changed)", func(t *testing.T) { - atlasCluster, err := mdbv1.DefaultAWSCluster("test-ns", "project-name").WithRegionName("US_WEST_1").Spec.Cluster() + t.Run("Deployments don't match (ProviderSettings.RegionName was changed)", func(t *testing.T) { + atlasDeployment, err := mdbv1.DefaultAWSDeployment("test-ns", "project-name").WithRegionName("US_WEST_1").Spec.Deployment() assert.NoError(t, err) // RegionName has changed and no ReplicationSpecs are specified (meaning ProviderSettings.RegionName is mandatory) - operatorCluster := mdbv1.DefaultAWSCluster("test-ns", "project-name").WithRegionName("EU_EAST_1") + operatorDeployment := mdbv1.DefaultAWSDeployment("test-ns", "project-name").WithRegionName("EU_EAST_1") - merged, err := MergedCluster(*atlasCluster, operatorCluster.Spec) + merged, err := MergedDeployment(*atlasDeployment, operatorDeployment.Spec) assert.NoError(t, err) - equal := ClustersEqual(zap.S(), *atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), *atlasDeployment, merged) assert.False(t, equal) }) - t.Run("Clusters match when Atlas adds default ReplicationSpecs", func(t *testing.T) { - atlasCluster, err := mdbv1.DefaultAWSCluster("test-ns", "project-name").Spec.Cluster() + t.Run("Deployments match when Atlas adds default ReplicationSpecs", func(t *testing.T) { + atlasDeployment, err := mdbv1.DefaultAWSDeployment("test-ns", "project-name").Spec.Deployment() assert.NoError(t, err) - atlasCluster.ReplicationSpecs = []mongodbatlas.ReplicationSpec{ + atlasDeployment.ReplicationSpecs = []mongodbatlas.ReplicationSpec{ { ID: "id", NumShards: int64ptr(1), @@ -93,22 +93,22 @@ func TestClusterMatchesSpec(t *testing.T) { }, }, } - operatorCluster := mdbv1.DefaultAWSCluster("test-ns", "project-name") - operatorCluster.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ + operatorDeployment := mdbv1.DefaultAWSDeployment("test-ns", "project-name") + operatorDeployment.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ NumShards: int64ptr(1), ZoneName: "zone1", }} - merged, err := MergedCluster(*atlasCluster, operatorCluster.Spec) + merged, err := MergedDeployment(*atlasDeployment, operatorDeployment.Spec) assert.NoError(t, err) - equal := ClustersEqual(zap.S(), *atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), *atlasDeployment, merged) assert.True(t, equal) }) - t.Run("Clusters don't match when Atlas adds default ReplicationSpecs and Operator overrides something", func(t *testing.T) { - atlasCluster, err := mdbv1.DefaultAWSCluster("test-ns", "project-name").Spec.Cluster() + t.Run("Deployments don't match when Atlas adds default ReplicationSpecs and Operator overrides something", func(t *testing.T) { + atlasDeployment, err := mdbv1.DefaultAWSDeployment("test-ns", "project-name").Spec.Deployment() assert.NoError(t, err) - atlasCluster.ReplicationSpecs = []mongodbatlas.ReplicationSpec{ + atlasDeployment.ReplicationSpecs = []mongodbatlas.ReplicationSpec{ { ID: "id", NumShards: int64ptr(1), @@ -118,13 +118,13 @@ func TestClusterMatchesSpec(t *testing.T) { }, }, } - operatorCluster := mdbv1.DefaultAWSCluster("test-ns", "project-name") - operatorCluster.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ + operatorDeployment := mdbv1.DefaultAWSDeployment("test-ns", "project-name") + operatorDeployment.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ NumShards: int64ptr(2), ZoneName: "zone5", }} - merged, err := MergedCluster(*atlasCluster, operatorCluster.Spec) + merged, err := MergedDeployment(*atlasDeployment, operatorDeployment.Spec) assert.NoError(t, err) expectedReplicationSpecs := []mongodbatlas.ReplicationSpec{ @@ -139,14 +139,14 @@ func TestClusterMatchesSpec(t *testing.T) { } assert.Equal(t, expectedReplicationSpecs, merged.ReplicationSpecs) - equal := ClustersEqual(zap.S(), *atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), *atlasDeployment, merged) assert.False(t, equal) }) - t.Run("Clusters don't match - Operator removed the region", func(t *testing.T) { - atlasCluster, err := mdbv1.DefaultAWSCluster("test-ns", "project-name").Spec.Cluster() + t.Run("Deployments don't match - Operator removed the region", func(t *testing.T) { + atlasDeployment, err := mdbv1.DefaultAWSDeployment("test-ns", "project-name").Spec.Deployment() assert.NoError(t, err) - atlasCluster.ReplicationSpecs = []mongodbatlas.ReplicationSpec{{ + atlasDeployment.ReplicationSpecs = []mongodbatlas.ReplicationSpec{{ ID: "id", NumShards: int64ptr(1), ZoneName: "zone1", @@ -155,8 +155,8 @@ func TestClusterMatchesSpec(t *testing.T) { "US_WEST": {AnalyticsNodes: int64ptr(2), ElectableNodes: int64ptr(5), Priority: int64ptr(6), ReadOnlyNodes: int64ptr(0)}, }}, } - operatorCluster := mdbv1.DefaultAWSCluster("test-ns", "project-name") - operatorCluster.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ + operatorDeployment := mdbv1.DefaultAWSDeployment("test-ns", "project-name") + operatorDeployment.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ NumShards: int64ptr(1), ZoneName: "zone1", RegionsConfig: map[string]mdbv1.RegionsConfig{ @@ -164,7 +164,7 @@ func TestClusterMatchesSpec(t *testing.T) { }}, } - merged, err := MergedCluster(*atlasCluster, operatorCluster.Spec) + merged, err := MergedDeployment(*atlasDeployment, operatorDeployment.Spec) assert.NoError(t, err) expectedReplicationSpecs := []mongodbatlas.ReplicationSpec{{ @@ -176,7 +176,7 @@ func TestClusterMatchesSpec(t *testing.T) { } assert.Equal(t, expectedReplicationSpecs, merged.ReplicationSpecs) - equal := ClustersEqual(zap.S(), *atlasCluster, merged) + equal := DeploymentsEqual(zap.S(), *atlasDeployment, merged) assert.False(t, equal) }) } diff --git a/pkg/controller/atlasdeployment/serverless_deployment.go b/pkg/controller/atlasdeployment/serverless_deployment.go index 3ce931dde9..6a331eb471 100644 --- a/pkg/controller/atlasdeployment/serverless_deployment.go +++ b/pkg/controller/atlasdeployment/serverless_deployment.go @@ -11,19 +11,19 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/pkg/controller/workflow" ) -func ensureServerlessInstanceState(ctx *workflow.Context, project *mdbv1.AtlasProject, serverlessSpec *mdbv1.ServerlessSpec) (atlasCluster *mongodbatlas.Cluster, _ workflow.Result) { - atlasCluster, resp, err := ctx.Client.ServerlessInstances.Get(context.Background(), project.Status.ID, serverlessSpec.Name) +func ensureServerlessInstanceState(ctx *workflow.Context, project *mdbv1.AtlasProject, serverlessSpec *mdbv1.ServerlessSpec) (atlasDeployment *mongodbatlas.Cluster, _ workflow.Result) { + atlasDeployment, resp, err := ctx.Client.ServerlessInstances.Get(context.Background(), project.Status.ID, serverlessSpec.Name) if err != nil { if resp == nil { - return atlasCluster, workflow.Terminate(workflow.Internal, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.Internal, err.Error()) } if resp.StatusCode != http.StatusNotFound { - return atlasCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } ctx.Log.Infof("Serverless Instance %s doesn't exist in Atlas - creating", serverlessSpec.Name) - atlasCluster, _, err = ctx.Client.ServerlessInstances.Create(context.Background(), project.Status.ID, &mongodbatlas.ServerlessCreateRequestParams{ + atlasDeployment, _, err = ctx.Client.ServerlessInstances.Create(context.Background(), project.Status.ID, &mongodbatlas.ServerlessCreateRequestParams{ Name: serverlessSpec.Name, ProviderSettings: &mongodbatlas.ServerlessProviderSettings{ BackingProviderName: serverlessSpec.ProviderSettings.BackingProviderName, @@ -32,22 +32,22 @@ func ensureServerlessInstanceState(ctx *workflow.Context, project *mdbv1.AtlasPr }, }) if err != nil { - return atlasCluster, workflow.Terminate(workflow.ClusterNotCreatedInAtlas, err.Error()) + return atlasDeployment, workflow.Terminate(workflow.DeploymentNotCreatedInAtlas, err.Error()) } } - switch atlasCluster.StateName { + switch atlasDeployment.StateName { case "IDLE": - return atlasCluster, workflow.OK() + return atlasDeployment, workflow.OK() case "CREATING": - return atlasCluster, workflow.InProgress(workflow.ClusterCreating, "cluster is provisioning") + return atlasDeployment, workflow.InProgress(workflow.DeploymentCreating, "deployment is provisioning") case "UPDATING", "REPAIRING": - return atlasCluster, workflow.InProgress(workflow.ClusterUpdating, "cluster is updating") + return atlasDeployment, workflow.InProgress(workflow.DeploymentUpdating, "deployment is updating") // TODO: add "DELETING", "DELETED", handle 404 on delete default: - return atlasCluster, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown cluster state %q", atlasCluster.StateName)) + return atlasDeployment, workflow.Terminate(workflow.Internal, fmt.Sprintf("unknown deployment state %q", atlasDeployment.StateName)) } } diff --git a/pkg/controller/atlasproject/atlasproject_controller.go b/pkg/controller/atlasproject/atlasproject_controller.go index 90ceecfd90..1e2f104c9a 100644 --- a/pkg/controller/atlasproject/atlasproject_controller.go +++ b/pkg/controller/atlasproject/atlasproject_controller.go @@ -110,7 +110,7 @@ func (r *AtlasProjectReconciler) Reconcile(context context.Context, req ctrl.Req if err != nil { if errRm := r.removeDeletionFinalizer(context, project); errRm != nil { result = workflow.Terminate(workflow.Internal, errRm.Error()) - setCondition(ctx, status.ClusterReadyType, result) + setCondition(ctx, status.DeploymentReadyType, result) } result = workflow.Terminate(workflow.AtlasCredentialsNotProvided, err.Error()) setCondition(ctx, status.ProjectReadyType, result) @@ -121,7 +121,7 @@ func (r *AtlasProjectReconciler) Reconcile(context context.Context, req ctrl.Req atlasClient, err := atlas.Client(r.AtlasDomain, connection, log) if err != nil { result := workflow.Terminate(workflow.Internal, err.Error()) - setCondition(ctx, status.ClusterReadyType, result) + setCondition(ctx, status.DeploymentReadyType, result) return result.ReconcileResult(), nil } ctx.Client = atlasClient @@ -146,7 +146,7 @@ func (r *AtlasProjectReconciler) Reconcile(context context.Context, req ctrl.Req log.Debugw("Add deletion finalizer", "name", getFinalizerName()) if err := r.addDeletionFinalizer(context, project); err != nil { result = workflow.Terminate(workflow.Internal, err.Error()) - setCondition(ctx, status.ClusterReadyType, result) + setCondition(ctx, status.DeploymentReadyType, result) return result.ReconcileResult(), nil } } @@ -164,14 +164,14 @@ func (r *AtlasProjectReconciler) Reconcile(context context.Context, req ctrl.Req if err = r.deleteAtlasProject(context, atlasClient, project); err != nil { result = workflow.Terminate(workflow.Internal, err.Error()) - setCondition(ctx, status.ClusterReadyType, result) + setCondition(ctx, status.DeploymentReadyType, result) return result.ReconcileResult(), nil } } if err = r.removeDeletionFinalizer(context, project); err != nil { result = workflow.Terminate(workflow.Internal, err.Error()) - setCondition(ctx, status.ClusterReadyType, result) + setCondition(ctx, status.DeploymentReadyType, result) return result.ReconcileResult(), nil } } diff --git a/pkg/controller/connectionsecret/listsecrets.go b/pkg/controller/connectionsecret/listsecrets.go index 3ce5cabb34..e966ce5459 100644 --- a/pkg/controller/connectionsecret/listsecrets.go +++ b/pkg/controller/connectionsecret/listsecrets.go @@ -11,8 +11,8 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/pkg/util/kube" ) -// ListByClusterName returns all secrets in the specified namespace that have labels for 'projectID' and 'clusterName' -func ListByClusterName(k8sClient client.Client, namespace, projectID, clusterName string) ([]corev1.Secret, error) { +// ListByDeploymentName returns all secrets in the specified namespace that have labels for 'projectID' and 'clusterName' +func ListByDeploymentName(k8sClient client.Client, namespace, projectID, clusterName string) ([]corev1.Secret, error) { return list(k8sClient, namespace, projectID, clusterName, "") } diff --git a/pkg/controller/connectionsecret/listsecrets_test.go b/pkg/controller/connectionsecret/listsecrets_test.go index 497d07a7b9..50614c0819 100644 --- a/pkg/controller/connectionsecret/listsecrets_test.go +++ b/pkg/controller/connectionsecret/listsecrets_test.go @@ -50,19 +50,19 @@ func TestListConnectionSecrets(t *testing.T) { _, err = Ensure(fakeClient, "otherNs", "p1", "603e7bf38a94956835659ae5", "c1", data) assert.NoError(t, err) - secrets, err := ListByClusterName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c1") + secrets, err := ListByDeploymentName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c1") assert.NoError(t, err) assert.Equal(t, []string{"p1-c1-user1", "p1-c1-user2"}, getSecretsNames(secrets)) - secrets, err = ListByClusterName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c2") + secrets, err = ListByDeploymentName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c2") assert.NoError(t, err) assert.Equal(t, []string{"p1-c2-user1"}, getSecretsNames(secrets)) - secrets, err = ListByClusterName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c3") + secrets, err = ListByDeploymentName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "c3") assert.NoError(t, err) assert.Len(t, getSecretsNames(secrets), 0) - secrets, err = ListByClusterName(fakeClient, "testNs", "non-existent-project-id", "c1") + secrets, err = ListByDeploymentName(fakeClient, "testNs", "non-existent-project-id", "c1") assert.NoError(t, err) assert.Len(t, getSecretsNames(secrets), 0) @@ -86,7 +86,7 @@ func TestListConnectionSecrets(t *testing.T) { _, err := Ensure(fakeClient, "testNs", "#nice project!", "603e7bf38a94956835659ae5", "the cluster@thecompany.com/", data) assert.NoError(t, err) - secrets, err := ListByClusterName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "the cluster@thecompany.com/") + secrets, err := ListByDeploymentName(fakeClient, "testNs", "603e7bf38a94956835659ae5", "the cluster@thecompany.com/") assert.NoError(t, err) assert.Equal(t, []string{"nice-project-the-cluster-thecompany.com-user1"}, getSecretsNames(secrets)) }) diff --git a/pkg/controller/validate/validate.go b/pkg/controller/validate/validate.go index df5d29dc6f..e5ab4bf72d 100644 --- a/pkg/controller/validate/validate.go +++ b/pkg/controller/validate/validate.go @@ -9,22 +9,22 @@ import ( mdbv1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1" ) -func ClusterSpec(clusterSpec mdbv1.AtlasDeploymentSpec) error { +func DeploymentSpec(deploymentSpec mdbv1.AtlasDeploymentSpec) error { var err error - if allAreNil(clusterSpec.AdvancedDeploymentSpec, clusterSpec.ServerlessSpec, clusterSpec.DeploymentSpec) { - err = multierror.Append(err, errors.New("expected exactly one of spec.clusterSpec or spec.advancedClusterSpec or spec.serverlessSpec to be present, but none were")) + if allAreNil(deploymentSpec.AdvancedDeploymentSpec, deploymentSpec.ServerlessSpec, deploymentSpec.DeploymentSpec) { + err = multierror.Append(err, errors.New("expected exactly one of spec.deploymentSpec or spec.advancedDepploymentSpec or spec.serverlessSpec to be present, but none were")) } - if moreThanOneIsNonNil(clusterSpec.AdvancedDeploymentSpec, clusterSpec.ServerlessSpec, clusterSpec.DeploymentSpec) { - err = multierror.Append(err, errors.New("expected exactly one of spec.clusterSpec, spec.advancedClusterSpec or spec.serverlessSpec, more than one were present")) + if moreThanOneIsNonNil(deploymentSpec.AdvancedDeploymentSpec, deploymentSpec.ServerlessSpec, deploymentSpec.DeploymentSpec) { + err = multierror.Append(err, errors.New("expected exactly one of spec.deploymentSpec, spec.advancedDepploymentSpec or spec.serverlessSpec, more than one were present")) } - if clusterSpec.DeploymentSpec != nil { - if clusterSpec.DeploymentSpec.ProviderSettings != nil && (clusterSpec.DeploymentSpec.ProviderSettings.InstanceSizeName == "" && clusterSpec.DeploymentSpec.ProviderSettings.ProviderName != "SERVERLESS") { + if deploymentSpec.DeploymentSpec != nil { + if deploymentSpec.DeploymentSpec.ProviderSettings != nil && (deploymentSpec.DeploymentSpec.ProviderSettings.InstanceSizeName == "" && deploymentSpec.DeploymentSpec.ProviderSettings.ProviderName != "SERVERLESS") { err = multierror.Append(err, errors.New("must specify instanceSizeName if provider name is not SERVERLESS")) } - if clusterSpec.DeploymentSpec.ProviderSettings != nil && (clusterSpec.DeploymentSpec.ProviderSettings.InstanceSizeName != "" && clusterSpec.DeploymentSpec.ProviderSettings.ProviderName == "SERVERLESS") { + if deploymentSpec.DeploymentSpec.ProviderSettings != nil && (deploymentSpec.DeploymentSpec.ProviderSettings.InstanceSizeName != "" && deploymentSpec.DeploymentSpec.ProviderSettings.ProviderName == "SERVERLESS") { err = multierror.Append(err, errors.New("must not specify instanceSizeName if provider name is SERVERLESS")) } } diff --git a/pkg/controller/validate/validate_test.go b/pkg/controller/validate/validate_test.go index 8e64f85e5b..5d35a44102 100644 --- a/pkg/controller/validate/validate_test.go +++ b/pkg/controller/validate/validate_test.go @@ -12,11 +12,11 @@ func TestClusterValidation(t *testing.T) { t.Run("Invalid cluster specs", func(t *testing.T) { t.Run("Multiple specs specified", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: &mdbv1.AdvancedDeploymentSpec{}, DeploymentSpec: &mdbv1.DeploymentSpec{}} - assert.Error(t, ClusterSpec(spec)) + assert.Error(t, DeploymentSpec(spec)) }) t.Run("No specs specified", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: nil, DeploymentSpec: nil} - assert.Error(t, ClusterSpec(spec)) + assert.Error(t, DeploymentSpec(spec)) }) t.Run("Instance size not empty when serverless", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: nil, DeploymentSpec: &mdbv1.DeploymentSpec{ @@ -25,7 +25,7 @@ func TestClusterValidation(t *testing.T) { ProviderName: "SERVERLESS", }, }} - assert.Error(t, ClusterSpec(spec)) + assert.Error(t, DeploymentSpec(spec)) }) t.Run("Instance size unset when not serverless", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: nil, DeploymentSpec: &mdbv1.DeploymentSpec{ @@ -34,19 +34,19 @@ func TestClusterValidation(t *testing.T) { ProviderName: "AWS", }, }} - assert.Error(t, ClusterSpec(spec)) + assert.Error(t, DeploymentSpec(spec)) }) }) t.Run("Valid cluster specs", func(t *testing.T) { t.Run("Advanced cluster spec specified", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: &mdbv1.AdvancedDeploymentSpec{}, DeploymentSpec: nil} - assert.NoError(t, ClusterSpec(spec)) - assert.Nil(t, ClusterSpec(spec)) + assert.NoError(t, DeploymentSpec(spec)) + assert.Nil(t, DeploymentSpec(spec)) }) t.Run("Regular cluster specs specified", func(t *testing.T) { spec := mdbv1.AtlasDeploymentSpec{AdvancedDeploymentSpec: nil, DeploymentSpec: &mdbv1.DeploymentSpec{}} - assert.NoError(t, ClusterSpec(spec)) - assert.Nil(t, ClusterSpec(spec)) + assert.NoError(t, DeploymentSpec(spec)) + assert.Nil(t, DeploymentSpec(spec)) }) t.Run("Serverless Cluster", func(t *testing.T) { @@ -55,8 +55,8 @@ func TestClusterValidation(t *testing.T) { ProviderName: "SERVERLESS", }, }} - assert.NoError(t, ClusterSpec(spec)) - assert.Nil(t, ClusterSpec(spec)) + assert.NoError(t, DeploymentSpec(spec)) + assert.Nil(t, DeploymentSpec(spec)) }) }) } diff --git a/pkg/controller/workflow/reason.go b/pkg/controller/workflow/reason.go index 3472731d0e..b960fd42f7 100644 --- a/pkg/controller/workflow/reason.go +++ b/pkg/controller/workflow/reason.go @@ -25,12 +25,12 @@ const ( // Atlas Cluster reasons const ( - ClusterNotCreatedInAtlas ConditionReason = "ClusterNotCreatedInAtlas" - ClusterNotUpdatedInAtlas ConditionReason = "ClusterNotUpdatedInAtlas" - ClusterCreating ConditionReason = "ClusterCreating" - ClusterUpdating ConditionReason = "ClusterUpdating" - ClusterConnectionSecretsNotCreated ConditionReason = "ClusterConnectionSecretsNotCreated" - ClusterAdvancedOptionsAreNotReady ConditionReason = "ClusterAdvancedOptionsAreNotReady" + DeploymentNotCreatedInAtlas ConditionReason = "DeploymentNotCreatedInAtlas" + DeploymentNotUpdatedInAtlas ConditionReason = "DeploymentNotUpdatedInAtlas" + DeploymentCreating ConditionReason = "DeploymentCreating" + DeploymentUpdating ConditionReason = "DeploymentUpdating" + DeploymentConnectionSecretsNotCreated ConditionReason = "DeploymentConnectionSecretsNotCreated" + DeploymentAdvancedOptionsAreNotReady ConditionReason = "DeploymentAdvancedOptionsAreNotReady" ) // Atlas Database User reasons @@ -39,7 +39,7 @@ const ( DatabaseUserNotUpdatedInAtlas ConditionReason = "DatabaseUserNotUpdatedInAtlas" DatabaseUserConnectionSecretsNotCreated ConditionReason = "DatabaseUserConnectionSecretsNotCreated" DatabaseUserStaleConnectionSecrets ConditionReason = "DatabaseUserStaleConnectionSecrets" - DatabaseUserClustersAppliedChanges ConditionReason = "ClustersAppliedDatabaseUsersChanges" + DatabaseUserDeploymentAppliedChanges ConditionReason = "DeploymentAppliedDatabaseUsersChanges" DatabaseUserInvalidSpec ConditionReason = "DatabaseUserInvalidSpec" DatabaseUserExpired ConditionReason = "DatabaseUserExpired" ) diff --git a/pkg/util/testutil/atlas.go b/pkg/util/testutil/atlas.go index a5707acdf8..fe47dfdd3f 100644 --- a/pkg/util/testutil/atlas.go +++ b/pkg/util/testutil/atlas.go @@ -7,22 +7,22 @@ import ( "go.mongodb.org/atlas/mongodbatlas" ) -// WaitForAtlasDeploymentStateToNotBeReached periodically checks the given atlas cluster for a given condition. The function +// WaitForAtlasDeploymentStateToNotBeReached periodically checks the given atlas deployment for a given condition. The function // returns true after the given context timeout is exceeded. -func WaitForAtlasDeploymentStateToNotBeReached(ctx context.Context, atlasClient *mongodbatlas.Client, projectName, clusterName string, fns ...func(*mongodbatlas.Cluster) bool) func() bool { +func WaitForAtlasDeploymentStateToNotBeReached(ctx context.Context, atlasClient *mongodbatlas.Client, projectName, deploymentName string, fns ...func(*mongodbatlas.Cluster) bool) func() bool { return func() bool { select { case <-ctx.Done(): return true default: - atlasCluster, _, err := atlasClient.Clusters.Get(context.Background(), projectName, clusterName) + atlasDeployment, _, err := atlasClient.Clusters.Get(context.Background(), projectName, deploymentName) if err != nil { return false } allTrue := true for _, fn := range fns { - if !fn(atlasCluster) { + if !fn(atlasDeployment) { allTrue = false } } diff --git a/test/e2e/actions/actions.go b/test/e2e/actions/actions.go index 2059fa282e..345680202f 100644 --- a/test/e2e/actions/actions.go +++ b/test/e2e/actions/actions.go @@ -17,43 +17,43 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" ) -func UpdateCluster(newData *model.TestDataProvider) { +func UpdateDeployment(newData *model.TestDataProvider) { var generation int - By("Update cluster\n", func() { + By("Update Deployment\n", func() { utils.SaveToFile( - newData.Resources.Clusters[0].ClusterFileName(newData.Resources), - utils.JSONToYAMLConvert(newData.Resources.Clusters[0]), + newData.Resources.Deployments[0].DeploymentFileName(newData.Resources), + utils.JSONToYAMLConvert(newData.Resources.Deployments[0]), ) - generation, _ = strconv.Atoi(kubecli.GetGeneration(newData.Resources.Namespace, newData.Resources.Clusters[0].GetClusterNameResource())) - kubecli.Apply(newData.Resources.Clusters[0].ClusterFileName(newData.Resources), "-n", newData.Resources.Namespace) + generation, _ = strconv.Atoi(kubecli.GetGeneration(newData.Resources.Namespace, newData.Resources.Deployments[0].GetDeploymentNameResource())) + kubecli.Apply(newData.Resources.Deployments[0].DeploymentFileName(newData.Resources), "-n", newData.Resources.Namespace) generation++ }) - By("Wait cluster updating\n", func() { - WaitCluster(newData.Resources, strconv.Itoa(generation)) + By("Wait Deployment updating\n", func() { + WaitDeployment(newData.Resources, strconv.Itoa(generation)) }) By("Check attributes\n", func() { - uCluster := mongocli.GetClustersInfo(newData.Resources.ProjectID, newData.Resources.Clusters[0].Spec.GetClusterName()) - CompareClustersSpec(newData.Resources.Clusters[0].Spec, uCluster) + uDeployment := mongocli.GetDeploymentsInfo(newData.Resources.ProjectID, newData.Resources.Deployments[0].Spec.GetDeploymentName()) + CompareDeploymentsSpec(newData.Resources.Deployments[0].Spec, uDeployment) }) } -func UpdateClusterFromUpdateConfig(data *model.TestDataProvider) { - By("Load new cluster config", func() { - data.Resources.Clusters = []model.AC{} // TODO for range +func UpdateDeploymentFromUpdateConfig(data *model.TestDataProvider) { + By("Load new Deployment config", func() { + data.Resources.Deployments = []model.AtlasDeployment{} // TODO for range GinkgoWriter.Write([]byte(data.ConfUpdatePaths[0])) - data.Resources.Clusters = append(data.Resources.Clusters, model.LoadUserClusterConfig(data.ConfUpdatePaths[0])) - data.Resources.Clusters[0].Spec.Project.Name = data.Resources.Project.GetK8sMetaName() + data.Resources.Deployments = append(data.Resources.Deployments, model.LoadUserDeploymentConfig(data.ConfUpdatePaths[0])) + data.Resources.Deployments[0].Spec.Project.Name = data.Resources.Project.GetK8sMetaName() utils.SaveToFile( - data.Resources.Clusters[0].ClusterFileName(data.Resources), - utils.JSONToYAMLConvert(data.Resources.Clusters[0]), + data.Resources.Deployments[0].DeploymentFileName(data.Resources), + utils.JSONToYAMLConvert(data.Resources.Deployments[0]), ) }) - UpdateCluster(data) + UpdateDeployment(data) - By("Check user data still in the cluster\n", func() { + By("Check user data still in the Deployment\n", func() { for i := range data.Resources.Users { // TODO in parallel(?) port := strconv.Itoa(i + data.PortGroup) key := port @@ -64,22 +64,22 @@ func UpdateClusterFromUpdateConfig(data *model.TestDataProvider) { }) } -func activateCluster(data *model.TestDataProvider, paused bool) { - data.Resources.Clusters[0].Spec.DeploymentSpec.Paused = &paused - UpdateCluster(data) - By("Check additional cluster field `paused`\n") - uCluster := mongocli.GetClustersInfo(data.Resources.ProjectID, data.Resources.Clusters[0].Spec.GetClusterName()) - Expect(uCluster.Paused).Should(Equal(data.Resources.Clusters[0].Spec.DeploymentSpec.Paused)) +func activateDeployment(data *model.TestDataProvider, paused bool) { + data.Resources.Deployments[0].Spec.DeploymentSpec.Paused = &paused + UpdateDeployment(data) + By("Check additional Deployment field `paused`\n") + uDeployment := mongocli.GetDeploymentsInfo(data.Resources.ProjectID, data.Resources.Deployments[0].Spec.GetDeploymentName()) + Expect(uDeployment.Paused).Should(Equal(data.Resources.Deployments[0].Spec.DeploymentSpec.Paused)) } -func SuspendCluster(data *model.TestDataProvider) { +func SuspendDeployment(data *model.TestDataProvider) { paused := true - activateCluster(data, paused) + activateDeployment(data, paused) } -func ReactivateCluster(data *model.TestDataProvider) { +func ReactivateDeployment(data *model.TestDataProvider) { paused := false - activateCluster(data, paused) + activateDeployment(data, paused) } func DeleteFirstUser(data *model.TestDataProvider) { diff --git a/test/e2e/actions/helm_related.go b/test/e2e/actions/helm_related.go index 07551dd8e6..3e5f24abe6 100644 --- a/test/e2e/actions/helm_related.go +++ b/test/e2e/actions/helm_related.go @@ -13,24 +13,24 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/model" ) -// helm update should change at least 1 field: databaseusers, project, cluster +// helm update should change at least 1 field: databaseusers, project, deployment func HelmDefaultUpgradeResouces(data *model.TestDataProvider) { By("User use HELM upgrade command for changing atlas resources\n", func() { data.Resources.Project.Spec.ProjectIPAccessList[0].Comment = "updated" enabled := true - data.Resources.Clusters[0].Spec.DeploymentSpec.ProviderBackupEnabled = &enabled + data.Resources.Deployments[0].Spec.DeploymentSpec.ProviderBackupEnabled = &enabled data.Resources.Users[0].DeleteAllRoles() data.Resources.Users[0].AddBuildInAdminRole() data.Resources.Users[0].Spec.Project.Name = data.Resources.GetAtlasProjectFullKubeName() - generation, _ := strconv.Atoi(kubecli.GetGeneration(data.Resources.Namespace, data.Resources.Clusters[0].GetClusterNameResource())) + generation, _ := strconv.Atoi(kubecli.GetGeneration(data.Resources.Namespace, data.Resources.Deployments[0].GetDeploymentNameResource())) helm.UpgradeAtlasDeploymentChartDev(data.Resources) By("Wait project creation", func() { - WaitCluster(data.Resources, strconv.Itoa(generation+1)) + WaitDeployment(data.Resources, strconv.Itoa(generation+1)) ExpectWithOffset(1, data.Resources.ProjectID).ShouldNot(BeEmpty()) }) - updatedCluster := mongocli.GetClustersInfo(data.Resources.ProjectID, data.Resources.Clusters[0].Spec.GetClusterName()) - CompareClustersSpec(data.Resources.Clusters[0].Spec, updatedCluster) + updatedDeployment := mongocli.GetDeploymentsInfo(data.Resources.ProjectID, data.Resources.Deployments[0].Spec.GetDeploymentName()) + CompareDeploymentsSpec(data.Resources.Deployments[0].Spec, updatedDeployment) user := mongocli.GetUser("admin", data.Resources.Users[0].Spec.Username, data.Resources.ProjectID) ExpectWithOffset(1, user.Roles[0].RoleName).Should(Equal(model.RoleBuildInAdmin)) }) @@ -66,15 +66,15 @@ func HelmUpgradeDeleteFirstUser(data *model.TestDataProvider) { // HelmUpgradeChartVersions upgrade chart version of crd, operator, and func HelmUpgradeChartVersions(data *model.TestDataProvider) { By("User update helm chart (used main-branch)", func() { - generation, _ := strconv.Atoi(kubecli.GetGeneration(data.Resources.Namespace, data.Resources.Clusters[0].GetClusterNameResource())) + generation, _ := strconv.Atoi(kubecli.GetGeneration(data.Resources.Namespace, data.Resources.Deployments[0].GetDeploymentNameResource())) helm.UpgradeOperatorChart(data.Resources) helm.UpgradeAtlasDeploymentChartDev(data.Resources) By("Wait updating") - WaitCluster(data.Resources, strconv.Itoa(generation+1)) + WaitDeployment(data.Resources, strconv.Itoa(generation+1)) - updatedCluster := mongocli.GetClustersInfo(data.Resources.ProjectID, data.Resources.Clusters[0].Spec.GetClusterName()) - CompareClustersSpec(data.Resources.Clusters[0].Spec, updatedCluster) + updatedDeployment := mongocli.GetDeploymentsInfo(data.Resources.ProjectID, data.Resources.Deployments[0].Spec.GetDeploymentName()) + CompareDeploymentsSpec(data.Resources.Deployments[0].Spec, updatedDeployment) CheckUsersAttributes(data.Resources) }) } diff --git a/test/e2e/actions/steps.go b/test/e2e/actions/steps.go index 8e40907e54..437975e75d 100644 --- a/test/e2e/actions/steps.go +++ b/test/e2e/actions/steps.go @@ -23,43 +23,43 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" ) -func WaitCluster(input model.UserInputs, generation string) { +func WaitDeployment(input model.UserInputs, generation string) { EventuallyWithOffset(1, func() string { - return kubecli.GetGeneration(input.Namespace, input.Clusters[0].GetClusterNameResource()) + return kubecli.GetGeneration(input.Namespace, input.Deployments[0].GetDeploymentNameResource()) }, "5m", "10s", ).Should(Equal(generation)) EventuallyWithOffset(1, func() string { - return kubecli.GetStatusCondition("Ready", input.Namespace, input.Clusters[0].GetClusterNameResource()) + return kubecli.GetStatusCondition("Ready", input.Namespace, input.Deployments[0].GetDeploymentNameResource()) }, "60m", "1m", - ).Should(Equal("True"), "Kubernetes resource: Cluster status `Ready` should be 'True'") + ).Should(Equal("True"), "Kubernetes resource: Deployment status `Ready` should be 'True'") - ExpectWithOffset(1, kubecli.GetK8sClusterStateName( - input.Namespace, input.Clusters[0].GetClusterNameResource()), - ).Should(Equal("IDLE"), "Kubernetes resource: Cluster status should be IDLE") + ExpectWithOffset(1, kubecli.GetK8sDeploymentStateName( + input.Namespace, input.Deployments[0].GetDeploymentNameResource()), + ).Should(Equal("IDLE"), "Kubernetes resource: Deployment status should be IDLE") - cluster := input.Clusters[0] + deployment := input.Deployments[0] switch { - case cluster.Spec.AdvancedDeploymentSpec != nil: + case deployment.Spec.AdvancedDeploymentSpec != nil: atlasClient, err := atlas.AClient() Expect(err).To(BeNil()) - advancedCluster, err := atlasClient.GetAdvancedDeployment(input.ProjectID, cluster.Spec.AdvancedDeploymentSpec.Name) + advancedDeployment, err := atlasClient.GetAdvancedDeployment(input.ProjectID, deployment.Spec.AdvancedDeploymentSpec.Name) Expect(err).To(BeNil()) - Expect(advancedCluster.StateName).To(Equal("IDLE")) - case cluster.Spec.ServerlessSpec != nil: + Expect(advancedDeployment.StateName).To(Equal("IDLE")) + case deployment.Spec.ServerlessSpec != nil: atlasClient, err := atlas.AClient() Expect(err).To(BeNil()) - serverlessInstance, err := atlasClient.GetServerlessInstance(input.ProjectID, cluster.Spec.ServerlessSpec.Name) + serverlessInstance, err := atlasClient.GetServerlessInstance(input.ProjectID, deployment.Spec.ServerlessSpec.Name) Expect(err).To(BeNil()) Expect(serverlessInstance.StateName).To(Equal("IDLE")) default: ExpectWithOffset( - 1, mongocli.GetClusterStateName(input.ProjectID, input.Clusters[0].Spec.GetClusterName()), - ).Should(Equal("IDLE"), "Atlas: Cluster status should be IDLE") + 1, mongocli.GetDeploymentStateName(input.ProjectID, input.Deployments[0].Spec.GetDeploymentName()), + ).Should(Equal("IDLE"), "Atlas: Deployment status should be IDLE") } } @@ -86,9 +86,9 @@ func WaitTestApplication(ns, label string) { EventuallyWithOffset(1, isAppRunning(), "2m", "10s").Should(BeTrue(), "Test application should be running") } -func CheckIfClusterExist(input model.UserInputs) func() bool { +func CheckIfDeploymentExist(input model.UserInputs) func() bool { return func() bool { - return mongocli.IsClusterExist(input.ProjectID, input.Clusters[0].Spec.DeploymentSpec.Name) + return mongocli.IsDeploymentExist(input.ProjectID, input.Deployments[0].Spec.DeploymentSpec.Name) } } @@ -115,7 +115,7 @@ func CheckIfUserExist(username, projecID string) func() bool { } } -func CompareClustersSpec(requested model.ClusterSpec, created mongodbatlas.Cluster) { +func CompareDeploymentsSpec(requested model.DeploymentSpec, created mongodbatlas.Cluster) { ExpectWithOffset(1, created).To(MatchFields(IgnoreExtras, Fields{ "MongoURI": Not(BeEmpty()), "MongoURIWithOptions": Not(BeEmpty()), @@ -128,7 +128,7 @@ func CompareClustersSpec(requested model.ClusterSpec, created mongodbatlas.Clust "Standard": Not(BeEmpty()), "StandardSrv": Not(BeEmpty()), })), - }), "Cluster should be the same as requested by the user") + }), "Deployment should be the same as requested by the user") if len(requested.DeploymentSpec.ReplicationSpecs) > 0 { for i, replica := range requested.DeploymentSpec.ReplicationSpecs { @@ -143,16 +143,16 @@ func CompareClustersSpec(requested model.ClusterSpec, created mongodbatlas.Clust } else { ExpectWithOffset(1, requested.DeploymentSpec.ProviderSettings).To(PointTo(MatchFields(IgnoreExtras, Fields{ "RegionName": Equal(created.ProviderSettings.RegionName), - })), "Cluster should be the same as requested by the user: Region Name") + })), "Deployment should be the same as requested by the user: Region Name") } if requested.DeploymentSpec.ProviderSettings.ProviderName == "TENANT" { ExpectWithOffset(1, requested.DeploymentSpec.ProviderSettings).To(PointTo(MatchFields(IgnoreExtras, Fields{ "BackingProviderName": Equal(created.ProviderSettings.BackingProviderName), - })), "Cluster should be the same as requested by the user: Backking Provider Name") + })), "Deployment should be the same as requested by the user: Backking Provider Name") } } -func CompareAdvancedDeploymentsSpec(requested model.ClusterSpec, created mongodbatlas.AdvancedCluster) { +func CompareAdvancedDeploymentsSpec(requested model.DeploymentSpec, created mongodbatlas.AdvancedCluster) { advancedSpec := requested.AdvancedDeploymentSpec Expect(created.MongoDBVersion).ToNot(BeEmpty()) Expect(created.MongoDBVersion).ToNot(BeEmpty()) @@ -174,7 +174,7 @@ func CompareAdvancedDeploymentsSpec(requested model.ClusterSpec, created mongodb } } -func CompareServerlessSpec(requested model.ClusterSpec, created mongodbatlas.Cluster) { +func CompareServerlessSpec(requested model.DeploymentSpec, created mongodbatlas.Cluster) { serverlessSpec := requested.ServerlessSpec Expect(created.MongoDBVersion).ToNot(BeEmpty()) Expect(created.ConnectionStrings.StandardSrv).ToNot(BeEmpty()) @@ -223,19 +223,19 @@ func SaveDefaultOperatorLogs(input model.UserInputs) { ) } -func SaveClusterDump(input model.UserInputs) { - kubecli.GetClusterDump(fmt.Sprintf("output/%s/dump", input.Namespace)) +func SaveDeploymentDump(input model.UserInputs) { + kubecli.GetDeploymentDump(fmt.Sprintf("output/%s/dump", input.Namespace)) } func CheckUsersAttributes(input model.UserInputs) { - userDBResourceName := func(clusterName string, user model.DBUser) string { // user name helmkind or kube-test-kind + userDBResourceName := func(deploymentName string, user model.DBUser) string { // user name helmkind or kube-test-kind if input.KeyName[0:4] == "helm" { - return fmt.Sprintf("atlasdatabaseusers.atlas.mongodb.com/%s-%s", clusterName, user.Spec.Username) + return fmt.Sprintf("atlasdatabaseusers.atlas.mongodb.com/%s-%s", deploymentName, user.Spec.Username) } return fmt.Sprintf("atlasdatabaseusers.atlas.mongodb.com/%s", user.ObjectMeta.Name) } - for _, cluster := range input.Clusters { + for _, deployment := range input.Deployments { for _, user := range input.Users { var atlasUser *mongodbatlas.DatabaseUser @@ -247,7 +247,7 @@ func CheckUsersAttributes(input model.UserInputs) { EventuallyWithOffset(1, getUser, "7m", "10s").Should(BeTrue()) EventuallyWithOffset(1, func() string { - return kubecli.GetStatusCondition("Ready", input.Namespace, userDBResourceName(cluster.ObjectMeta.Name, user)) + return kubecli.GetStatusCondition("Ready", input.Namespace, userDBResourceName(deployment.ObjectMeta.Name, user)) }, "7m", "1m", ).Should(Equal("True"), "Kubernetes resource: User resources status `Ready` should be True") @@ -317,12 +317,12 @@ func PrepareUsersConfigurations(data *model.TestDataProvider) { GinkgoWriter.Write([]byte(data.Resources.ProjectPath + "\n")) utils.SaveToFile(data.Resources.ProjectPath, data.Resources.Project.ConvertByte()) }) - if len(data.Resources.Clusters) > 0 { - By("Create cluster spec", func() { - data.Resources.Clusters[0].Spec.Project.Name = data.Resources.Project.GetK8sMetaName() + if len(data.Resources.Deployments) > 0 { + By("Create deployment spec", func() { + data.Resources.Deployments[0].Spec.Project.Name = data.Resources.Project.GetK8sMetaName() utils.SaveToFile( - data.Resources.Clusters[0].ClusterFileName(data.Resources), - utils.JSONToYAMLConvert(data.Resources.Clusters[0]), + data.Resources.Deployments[0].DeploymentFileName(data.Resources), + utils.JSONToYAMLConvert(data.Resources.Deployments[0]), ) }) } @@ -404,16 +404,16 @@ func DeployProjectAndWait(data *model.TestDataProvider, generation string) { }) } -func DeployCluster(data *model.TestDataProvider, generation string) { - By("Create cluster", func() { - kubecli.Apply(data.Resources.Clusters[0].ClusterFileName(data.Resources), "-n", data.Resources.Namespace) +func DeployDeployment(data *model.TestDataProvider, generation string) { + By("Create deployment", func() { + kubecli.Apply(data.Resources.Deployments[0].DeploymentFileName(data.Resources), "-n", data.Resources.Namespace) }) - By("Wait cluster creation", func() { - WaitCluster(data.Resources, "1") + By("Wait deployment creation", func() { + WaitDeployment(data.Resources, "1") }) - By("check cluster Attribute", func() { - cluster := mongocli.GetClustersInfo(data.Resources.ProjectID, data.Resources.Clusters[0].Spec.DeploymentSpec.Name) - CompareClustersSpec(data.Resources.Clusters[0].Spec, cluster) + By("check deployment Attribute", func() { + deployment := mongocli.GetDeploymentsInfo(data.Resources.ProjectID, data.Resources.Deployments[0].Spec.DeploymentSpec.Name) + CompareDeploymentsSpec(data.Resources.Deployments[0].Spec, deployment) }) } @@ -433,7 +433,7 @@ func DeployUsers(data *model.TestDataProvider) { // DeployUserResourcesAction deploy all user resources, wait, and check results func DeployUserResourcesAction(data *model.TestDataProvider) { DeployProjectAndWait(data, "1") - DeployCluster(data, "1") + DeployDeployment(data, "1") DeployUsers(data) } @@ -446,17 +446,17 @@ func DeleteDBUsersApps(data model.TestDataProvider) { } func DeleteUserResources(data *model.TestDataProvider) { - DeleteUserResourcesCluster(data) + DeleteUserResourcesDeployment(data) DeleteUserResourcesProject(data) } -func DeleteUserResourcesCluster(data *model.TestDataProvider) { - By("Delete cluster", func() { - kubecli.Delete(data.Resources.Clusters[0].ClusterFileName(data.Resources), "-n", data.Resources.Namespace) +func DeleteUserResourcesDeployment(data *model.TestDataProvider) { + By("Delete deployment", func() { + kubecli.Delete(data.Resources.Deployments[0].DeploymentFileName(data.Resources), "-n", data.Resources.Namespace) Eventually( - CheckIfClusterExist(data.Resources), + CheckIfDeploymentExist(data.Resources), "10m", "1m", - ).Should(BeFalse(), "Cluster should be deleted from Atlas") + ).Should(BeFalse(), "Deployment should be deleted from Atlas") }) } diff --git a/test/e2e/api/atlas/atlas.go b/test/e2e/api/atlas/atlas.go index 1664ea40df..98f4b50059 100644 --- a/test/e2e/api/atlas/atlas.go +++ b/test/e2e/api/atlas/atlas.go @@ -84,21 +84,21 @@ func (a *Atlas) GetPrivateEndpoint(projectID, provider string) ([]mongodbatlas.P return enpointsList, nil } -func (a *Atlas) GetAdvancedDeployment(projectId, clusterName string) (*mongodbatlas.AdvancedCluster, error) { - advancedCluster, _, err := a.Client.AdvancedClusters.Get(context.Background(), projectId, clusterName) +func (a *Atlas) GetAdvancedDeployment(projectId, deploymentName string) (*mongodbatlas.AdvancedCluster, error) { + advancedDeployment, _, err := a.Client.AdvancedClusters.Get(context.Background(), projectId, deploymentName) if err != nil { return nil, err } - ginkgoPrettyPrintf(advancedCluster, "getting advanced cluster %s in project %s", clusterName, projectId) - return advancedCluster, nil + ginkgoPrettyPrintf(advancedDeployment, "getting advanced deployment %s in project %s", deploymentName, projectId) + return advancedDeployment, nil } -func (a *Atlas) GetServerlessInstance(projectId, clusterName string) (*mongodbatlas.Cluster, error) { - serverlessInstance, _, err := a.Client.ServerlessInstances.Get(context.Background(), projectId, clusterName) +func (a *Atlas) GetServerlessInstance(projectId, deploymentName string) (*mongodbatlas.Cluster, error) { + serverlessInstance, _, err := a.Client.ServerlessInstances.Get(context.Background(), projectId, deploymentName) if err != nil { return nil, err } - ginkgoPrettyPrintf(serverlessInstance, "getting serverless instance %s in project %s", clusterName, projectId) + ginkgoPrettyPrintf(serverlessInstance, "getting serverless instance %s in project %s", deploymentName, projectId) return serverlessInstance, nil } diff --git a/test/e2e/bundle_test.go b/test/e2e/bundle_test.go index a48b995d23..d3f7061a3f 100644 --- a/test/e2e/bundle_test.go +++ b/test/e2e/bundle_test.go @@ -25,25 +25,25 @@ var _ = Describe("User can deploy operator from bundles", func() { Eventually(kubecli.GetVersionOutput()).Should(Say(K8sVersion)) }) _ = AfterEach(func() { - By("Atfer each.", func() { + By("After each.", func() { if CurrentSpecReport().Failed() { actions.SaveK8sResources( - []string{"atlasclusters", "atlasdatabaseusers", "atlasprojects"}, + []string{"atlasdeployments", "atlasdatabaseusers", "atlasprojects"}, data.Resources.Namespace, ) - actions.SaveClusterDump(data.Resources) + actions.SaveDeploymentDump(data.Resources) actions.AfterEachFinalCleanup([]model.TestDataProvider{data}) } }) }) It("User can install operator with OLM", Label("bundle-test"), func() { - By("User creates configuration for a new Project and Cluster", func() { + By("User creates configuration for a new Project and Deployment", func() { data = model.NewTestDataProvider( "bundle-wide", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader"). @@ -66,7 +66,7 @@ var _ = Describe("User can deploy operator from bundles", func() { actions.DeployUserResourcesAction(&data) }) - By("Delete user resources(project/cluster)", func() { + By("Delete user resources(project/deployment)", func() { actions.DeleteUserResources(&data) }) }) diff --git a/test/e2e/cli/helm/helm.go b/test/e2e/cli/helm/helm.go index 58932fc57f..8d2a8c5ac8 100644 --- a/test/e2e/cli/helm/helm.go +++ b/test/e2e/cli/helm/helm.go @@ -72,7 +72,7 @@ func InstallTestApplication(input model.UserInputs, user model.DBUser, port stri Install( "test-app-"+user.Spec.Username, config.TestAppHelmChartPath, - "--set-string", fmt.Sprintf("connectionSecret=%s-%s-%s", input.Project.GetProjectName(), input.Clusters[0].Spec.GetClusterName(), user.Spec.Username), + "--set-string", fmt.Sprintf("connectionSecret=%s-%s-%s", input.Project.GetProjectName(), input.Deployments[0].Spec.GetDeploymentName(), user.Spec.Username), "--set-string", fmt.Sprintf("nodePort=%s", port), "-n", input.Namespace, ) @@ -82,7 +82,7 @@ func RestartTestApplication(input model.UserInputs, user model.DBUser, port stri Upgrade( "test-app-"+user.Spec.Username, config.TestAppHelmChartPath, - "--set-string", fmt.Sprintf("connectionSecret=%s-%s-%s", input.Project.GetProjectName(), input.Clusters[0].Spec.GetClusterName(), user.Spec.Username), + "--set-string", fmt.Sprintf("connectionSecret=%s-%s-%s", input.Project.GetProjectName(), input.Deployments[0].Spec.GetDeploymentName(), user.Spec.Username), "--set-string", fmt.Sprintf("nodePort=%s", port), "-n", input.Namespace, "--recreate-pods", @@ -167,15 +167,15 @@ func AddMongoDBRepo() { cli.SessionShouldExit(session) } -// InstallClusterSubmodule install the Atlas Cluster Helm Chart from submodule. -func InstallClusterSubmodule(input model.UserInputs) { +// InstallDeploymentSubmodule install the Atlas Deployment Helm Chart from submodule. +func InstallDeploymentSubmodule(input model.UserInputs) { PrepareHelmChartValuesFile(input) args := prepareHelmChartArgs(input, config.AtlasDeploymentHelmChartPath) Install(args...) } -// InstallClusterRelease from repo -func InstallClusterRelease(input model.UserInputs) { +// InstallDeploymentRelease from repo +func InstallDeploymentRelease(input model.UserInputs) { PrepareHelmChartValuesFile(input) args := prepareHelmChartArgs(input, "mongodb/atlas-deployment") Install(args...) @@ -207,7 +207,7 @@ func packageChart(sPath, dPath string) { func prepareHelmChartArgs(input model.UserInputs, chartName string) []string { args := []string{ - input.Clusters[0].Spec.GetClusterName(), + input.Deployments[0].Spec.GetDeploymentName(), chartName, "--set-string", fmt.Sprintf("atlas.secret.orgId=%s", os.Getenv("MCLI_ORG_ID")), "--set-string", fmt.Sprintf("atlas.secret.publicApiKey=%s", os.Getenv("MCLI_PUBLIC_API_KEY")), @@ -216,7 +216,7 @@ func prepareHelmChartArgs(input model.UserInputs, chartName string) []string { "--set-string", fmt.Sprintf("project.fullnameOverride=%s", input.Project.GetK8sMetaName()), "--set-string", fmt.Sprintf("project.atlasProjectName=%s", input.Project.GetProjectName()), - "--set-string", fmt.Sprintf("fullnameOverride=%s", input.Clusters[0].ObjectMeta.Name), + "--set-string", fmt.Sprintf("fullnameOverride=%s", input.Deployments[0].ObjectMeta.Name), "-f", pathToAtlasDeploymentValuesFile(input), "--namespace=" + input.Namespace, diff --git a/test/e2e/cli/helm/prepare_value_file.go b/test/e2e/cli/helm/prepare_value_file.go index 795fc621dc..b780a9f7ad 100644 --- a/test/e2e/cli/helm/prepare_value_file.go +++ b/test/e2e/cli/helm/prepare_value_file.go @@ -7,16 +7,16 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" ) -// Prepare chart values file for project, clusters, users https://github.com/mongodb/helm-charts/blob/main/charts/atlas-cluster/values.yaml +// Prepare chart values file for project, deployments, users https://github.com/mongodb/helm-charts/blob/main/charts/atlas-deployment/values.yaml func PrepareHelmChartValuesFile(input model.UserInputs) { type usersType struct { model.UserSpec Password string `json:"password,omitempty"` } type values struct { - Project model.ProjectSpec `json:"project,omitempty"` - Clusters []model.ClusterSpec `json:"deployments,omitempty"` - Users []usersType `json:"users,omitempty"` + Project model.ProjectSpec `json:"project,omitempty"` + Deployments []model.DeploymentSpec `json:"deployments,omitempty"` + Users []usersType `json:"users,omitempty"` } convertType := func(user model.DBUser) usersType { var newUser usersType @@ -29,9 +29,9 @@ func PrepareHelmChartValuesFile(input model.UserInputs) { newUser.DeleteAfterDate = user.Spec.DeleteAfterDate return newUser } - newValues := values{input.Project.Spec, []model.ClusterSpec{}, []usersType{}} - for i := range input.Clusters { - newValues.Clusters = append(newValues.Clusters, input.Clusters[i].Spec) + newValues := values{input.Project.Spec, []model.DeploymentSpec{}, []usersType{}} + for i := range input.Deployments { + newValues.Deployments = append(newValues.Deployments, input.Deployments[i].Spec) } for i := range input.Users { secret, _ := password.Generate(10, 3, 0, false, false) diff --git a/test/e2e/cli/kubecli/kubecli.go b/test/e2e/cli/kubecli/kubecli.go index 1a31285294..3ad5c4c4d9 100644 --- a/test/e2e/cli/kubecli/kubecli.go +++ b/test/e2e/cli/kubecli/kubecli.go @@ -69,17 +69,17 @@ func GetProjectResource(namespace, rName string) []byte { return session.Wait("1m").Out.Contents() } -// GetClusterResource -func GetClusterResource(namespace, rName string) v1.AtlasDeployment { +// GetDeploymentResource +func GetDeploymentResource(namespace, rName string) v1.AtlasDeployment { session := cli.Execute("kubectl", "get", rName, "-n", namespace, "-o", "json") output := session.Wait("1m").Out.Contents() - var cluster v1.AtlasDeployment - ExpectWithOffset(1, json.Unmarshal(output, &cluster)).ShouldNot(HaveOccurred()) - return cluster + var deployment v1.AtlasDeployment + ExpectWithOffset(1, json.Unmarshal(output, &deployment)).ShouldNot(HaveOccurred()) + return deployment } -func GetK8sClusterStateName(ns, rName string) string { - return GetClusterResource(ns, rName).Status.StateName +func GetK8sDeploymentStateName(ns, rName string) string { + return GetDeploymentResource(ns, rName).Status.StateName } func DeleteNamespace(ns string) *Buffer { @@ -285,7 +285,7 @@ func GetPrivateEndpoint(resource, ns string) []byte { // TODO do we need []byte? return session.Out.Contents() } -func GetClusterDump(output string) { +func GetDeploymentDump(output string) { outputFolder := fmt.Sprintf("--output-directory=%s", output) session := cli.Execute("kubectl", "cluster-info", "dump", "--all-namespaces", outputFolder) EventuallyWithOffset(1, session).Should(gexec.Exit(0)) diff --git a/test/e2e/cli/mongocli/mongocli.go b/test/e2e/cli/mongocli/mongocli.go index 47c9ff578b..6259134c37 100644 --- a/test/e2e/cli/mongocli/mongocli.go +++ b/test/e2e/cli/mongocli/mongocli.go @@ -14,22 +14,22 @@ import ( cli "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/cli" ) -func GetClusters(projectID string) []mongodbatlas.Cluster { +func GetDeployments(projectID string) []mongodbatlas.Cluster { session := cli.Execute("mongocli", "atlas", "clusters", "list", "--projectId", projectID, "-o", "json") output := session.Wait("1m").Out.Contents() - var clusters []mongodbatlas.Cluster - ExpectWithOffset(1, json.Unmarshal(output, &clusters)).ShouldNot(HaveOccurred()) - return clusters + var deployments []mongodbatlas.Cluster + ExpectWithOffset(1, json.Unmarshal(output, &deployments)).ShouldNot(HaveOccurred()) + return deployments } -func GetClusterByName(projectID string, name string) mongodbatlas.Cluster { - clusters := GetClusters(projectID) - for _, c := range clusters { +func GetDeploymentByName(projectID string, name string) mongodbatlas.Cluster { + deployments := GetDeployments(projectID) + for _, c := range deployments { if c.Name == name { return c } } - panic(fmt.Sprintf("no Cluster with name %s in project %s", name, projectID)) + panic(fmt.Sprintf("no deployment with name %s in project %s", name, projectID)) } func GetProjects() mongodbatlas.Projects { @@ -52,13 +52,13 @@ func GetProjectID(name string) string { return "" } -func GetClustersInfo(projectID string, name string) mongodbatlas.Cluster { +func GetDeploymentsInfo(projectID string, name string) mongodbatlas.Cluster { session := cli.Execute("mongocli", "atlas", "clusters", "describe", name, "--projectId", projectID, "-o", "json") EventuallyWithOffset(1, session).Should(gexec.Exit(0)) output := session.Out.Contents() - var cluster mongodbatlas.Cluster - ExpectWithOffset(1, json.Unmarshal(output, &cluster)).ShouldNot(HaveOccurred()) - return cluster + var deployment mongodbatlas.Cluster + ExpectWithOffset(1, json.Unmarshal(output, &deployment)).ShouldNot(HaveOccurred()) + return deployment } func IsProjectInfoExist(projectID string) bool { @@ -67,8 +67,8 @@ func IsProjectInfoExist(projectID string) bool { return session.ExitCode() == 0 } -func DeleteCluster(projectID, clusterName string) *Buffer { - session := cli.Execute("mongocli", "atlas", "cluster", "delete", clusterName, "--projectId", projectID, "--force") +func DeleteDeployment(projectID, deploymentName string) *Buffer { + session := cli.Execute("mongocli", "atlas", "cluster", "delete", deploymentName, "--projectId", projectID, "--force") return session.Wait().Out } @@ -83,9 +83,9 @@ func IsProjectExist(name string) bool { return false } -func IsClusterExist(projectID string, name string) bool { - clusters := GetClusters(projectID) - for _, c := range clusters { +func IsDeploymentExist(projectID string, name string) bool { + deployments := GetDeployments(projectID) + for _, c := range deployments { GinkgoWriter.Write([]byte(c.Name + "<->" + name + "\n")) if c.Name == name { return true @@ -94,8 +94,8 @@ func IsClusterExist(projectID string, name string) bool { return false } -func GetClusterStateName(projectID string, clusterName string) string { - result := GetClustersInfo(projectID, clusterName) +func GetDeploymentStateName(projectID string, deploymentName string) string { + result := GetDeploymentsInfo(projectID, deploymentName) return result.StateName } diff --git a/test/e2e/configuration_test.go b/test/e2e/configuration_test.go index cd6ff6fb9f..a8920699bc 100644 --- a/test/e2e/configuration_test.go +++ b/test/e2e/configuration_test.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" ) -var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns"), func() { +var _ = Describe("Configuration namespaced. Deploy deployment", Label("deployment-ns"), func() { var data model.TestDataProvider BeforeEach(func() { @@ -38,7 +38,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") ) actions.SaveTestAppLogs(data.Resources) actions.SaveK8sResources( - []string{"deploy", "atlasclusters", "atlasdatabaseusers", "atlasprojects"}, + []string{"deploy", "atlasdeployments", "atlasdatabaseusers", "atlasprojects"}, data.Resources.Namespace, ) actions.AfterEachFinalCleanup([]model.TestDataProvider{data}) @@ -55,7 +55,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-trial", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -73,8 +73,8 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-prodlike", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, - []string{"data/atlascluster_backup_update.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, + []string{"data/atlasdeployment_backup_update.yaml"}, []model.DBUser{ *model.NewDBUser("admin"). WithSecretRef("dbuser-admin-secret-u1"). @@ -85,9 +85,9 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") }, 30001, []func(*model.TestDataProvider){ - actions.UpdateClusterFromUpdateConfig, - actions.SuspendCluster, - actions.ReactivateCluster, + actions.UpdateDeploymentFromUpdateConfig, + actions.SuspendDeployment, + actions.ReactivateDeployment, actions.DeleteFirstUser, }, ), @@ -97,8 +97,8 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-multiregion-aws", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_multiregion_aws.yaml"}, - []string{"data/atlascluster_multiregion_aws_update.yaml"}, + []string{"data/atlasdeployment_multiregion_aws.yaml"}, + []string{"data/atlasdeployment_multiregion_aws_update.yaml"}, []model.DBUser{ *model.NewDBUser("user1"). WithSecretRef("dbuser-secret-u1"). @@ -109,8 +109,8 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") }, 30003, []func(*model.TestDataProvider){ - actions.SuspendCluster, - actions.ReactivateCluster, + actions.SuspendDeployment, + actions.ReactivateDeployment, actions.DeleteFirstUser, }, ), @@ -120,7 +120,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-multiregion-azure", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_multiregion_azure.yaml"}, + []string{"data/atlasdeployment_multiregion_azure.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -138,7 +138,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-multiregion-gcp", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_multiregion_gcp.yaml"}, + []string{"data/atlasdeployment_multiregion_gcp.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -151,13 +151,13 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") }, ), ), - Entry("Product Owner - Simplest configuration with ProjectOwner and update cluster to have backup", Label("ns-owner"), + Entry("Product Owner - Simplest configuration with ProjectOwner and update Deployment to have backup", Label("ns-owner"), model.NewTestDataProvider( "operator-ns-product-owner", model.AProject{}, model.NewEmptyAtlasKeyType().WithRoles([]model.AtlasRoles{model.GroupOwner}).WithWhiteList([]string{"0.0.0.1/1", "128.0.0.0/1"}), - []string{"data/atlascluster_backup.yaml"}, - []string{"data/atlascluster_backup_update_remove_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, + []string{"data/atlasdeployment_backup_update_remove_backup.yaml"}, []model.DBUser{ *model.NewDBUser("user1"). WithSecretRef("dbuser-secret-u1"). @@ -165,7 +165,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") }, 30010, []func(*model.TestDataProvider){ - actions.UpdateClusterFromUpdateConfig, + actions.UpdateDeploymentFromUpdateConfig, }, ), ), @@ -174,7 +174,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-trial-global", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -192,7 +192,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-free", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic_free.yaml"}, + []string{"data/atlasdeployment_basic_free.yaml"}, []string{""}, []model.DBUser{ *model.NewDBUser("user"). @@ -210,7 +210,7 @@ var _ = Describe("Configuration namespaced. Deploy cluster", Label("cluster-ns") "operator-ns-free", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_basic_free.yaml"}, + []string{"data/atlasdeployment_basic_free.yaml"}, []string{""}, []model.DBUser{ *model.NewDBUser("user"). diff --git a/test/e2e/data/atlascluster_advanced_helm.yaml b/test/e2e/data/atlasdeployment_advanced_helm.yaml similarity index 85% rename from test/e2e/data/atlascluster_advanced_helm.yaml rename to test/e2e/data/atlasdeployment_advanced_helm.yaml index b94d1183b9..047f6fd3b3 100644 --- a/test/e2e/data/atlascluster_advanced_helm.yaml +++ b/test/e2e/data/atlasdeployment_advanced_helm.yaml @@ -1,13 +1,13 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlas-cluster-advanced + name: atlas-deployment-advanced spec: projectRef: name: my-project advancedDeploymentSpec: clusterType: REPLICASET - name: advanced-cluster + name: advanced-deployment replicationSpecs: - regionConfigs: - electableSpecs: diff --git a/test/e2e/data/atlascluster_advanced_multi_region_helm.yaml b/test/e2e/data/atlasdeployment_advanced_multi_region_helm.yaml similarity index 94% rename from test/e2e/data/atlascluster_advanced_multi_region_helm.yaml rename to test/e2e/data/atlasdeployment_advanced_multi_region_helm.yaml index cbc466cc8d..57653df6f1 100644 --- a/test/e2e/data/atlascluster_advanced_multi_region_helm.yaml +++ b/test/e2e/data/atlasdeployment_advanced_multi_region_helm.yaml @@ -1,13 +1,13 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlas-cluster-advanced + name: atlas-deployment-advanced spec: projectRef: name: my-project advancedDeploymentSpec: clusterType: GEOSHARDED - name: advanced-cluster + name: advanced-deployment replicationSpecs: - numShards: 1 zoneName: Zone1 diff --git a/test/e2e/data/atlascluster_backup.yaml b/test/e2e/data/atlasdeployment_backup.yaml similarity index 81% rename from test/e2e/data/atlascluster_backup.yaml rename to test/e2e/data/atlasdeployment_backup.yaml index 301d590c30..9bab3f836b 100644 --- a/test/e2e/data/atlascluster_backup.yaml +++ b/test/e2e/data/atlasdeployment_backup.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-backup + name: atlas-deployment-backup spec: projectRef: name: my-project deploymentSpec: - name: cluster-backup + name: deployment-backup providerBackupEnabled: true providerSettings: instanceSizeName: M10 diff --git a/test/e2e/data/atlascluster_backup_update.yaml b/test/e2e/data/atlasdeployment_backup_update.yaml similarity index 81% rename from test/e2e/data/atlascluster_backup_update.yaml rename to test/e2e/data/atlasdeployment_backup_update.yaml index 21f60433ee..5c789a1c22 100644 --- a/test/e2e/data/atlascluster_backup_update.yaml +++ b/test/e2e/data/atlasdeployment_backup_update.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-backup + name: atlas-deployment-backup spec: projectRef: name: my-project deploymentSpec: - name: cluster-backup + name: deployment-backup providerBackupEnabled: false providerSettings: instanceSizeName: M20 diff --git a/test/e2e/data/atlascluster_backup_update_remove_backup.yaml b/test/e2e/data/atlasdeployment_backup_update_remove_backup.yaml similarity index 81% rename from test/e2e/data/atlascluster_backup_update_remove_backup.yaml rename to test/e2e/data/atlasdeployment_backup_update_remove_backup.yaml index bf8a47c2a0..3a70b07189 100644 --- a/test/e2e/data/atlascluster_backup_update_remove_backup.yaml +++ b/test/e2e/data/atlasdeployment_backup_update_remove_backup.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-backup + name: atlas-deployment-backup spec: projectRef: name: my-project deploymentSpec: - name: cluster-backup + name: deployment-backup providerBackupEnabled: false providerSettings: instanceSizeName: M10 diff --git a/test/e2e/data/atlascluster_basic.yaml b/test/e2e/data/atlasdeployment_basic.yaml similarity index 81% rename from test/e2e/data/atlascluster_basic.yaml rename to test/e2e/data/atlasdeployment_basic.yaml index 4224103e4b..238f11e6ac 100644 --- a/test/e2e/data/atlascluster_basic.yaml +++ b/test/e2e/data/atlasdeployment_basic.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-basic + name: atlas-deployment-basic spec: projectRef: name: my-project deploymentSpec: - name: cluster-basic + name: deployment-basic providerSettings: instanceSizeName: M2 providerName: TENANT diff --git a/test/e2e/data/atlascluster_basic_free.yaml b/test/e2e/data/atlasdeployment_basic_free.yaml similarity index 80% rename from test/e2e/data/atlascluster_basic_free.yaml rename to test/e2e/data/atlasdeployment_basic_free.yaml index a950fa53c8..284fb4f9ac 100644 --- a/test/e2e/data/atlascluster_basic_free.yaml +++ b/test/e2e/data/atlasdeployment_basic_free.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-basic-free + name: atlas-deployment-basic-free spec: projectRef: name: my-project deploymentSpec: - name: cluster-basic + name: deployment-basic providerSettings: instanceSizeName: M0 providerName: TENANT diff --git a/test/e2e/data/atlascluster_basic_helm.yaml b/test/e2e/data/atlasdeployment_basic_helm.yaml similarity index 75% rename from test/e2e/data/atlascluster_basic_helm.yaml rename to test/e2e/data/atlasdeployment_basic_helm.yaml index 4aefb3b208..157d4b27db 100644 --- a/test/e2e/data/atlascluster_basic_helm.yaml +++ b/test/e2e/data/atlasdeployment_basic_helm.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-basic-helm + name: atlas-deployment-basic-helm spec: projectRef: name: my-project deploymentSpec: - name: atlascluster-basic-helm + name: atlas-deployment-basic-helm providerSettings: instanceSizeName: M10 providerName: AWS diff --git a/test/e2e/data/atlascluster_basic_update.yaml b/test/e2e/data/atlasdeployment_basic_update.yaml similarity index 81% rename from test/e2e/data/atlascluster_basic_update.yaml rename to test/e2e/data/atlasdeployment_basic_update.yaml index 17efac665a..8de14a9c4c 100644 --- a/test/e2e/data/atlascluster_basic_update.yaml +++ b/test/e2e/data/atlasdeployment_basic_update.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-basic + name: atlas-deployment-basic spec: projectRef: name: my-project deploymentSpec: - name: cluster-basic + name: deployment-basic providerSettings: instanceSizeName: M5 providerName: TENANT diff --git a/test/e2e/data/atlascluster_multiregion_aws.yaml b/test/e2e/data/atlasdeployment_multiregion_aws.yaml similarity index 88% rename from test/e2e/data/atlascluster_multiregion_aws.yaml rename to test/e2e/data/atlasdeployment_multiregion_aws.yaml index 8bea001406..b9bc941dcb 100644 --- a/test/e2e/data/atlascluster_multiregion_aws.yaml +++ b/test/e2e/data/atlasdeployment_multiregion_aws.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-multiregion-aws + name: atlas-deployment-multiregion-aws spec: projectRef: name: my-project deploymentSpec: - name: cluster-multiregion-aws + name: deployment-multiregion-aws providerBackupEnabled: true clusterType: REPLICASET providerSettings: diff --git a/test/e2e/data/atlascluster_multiregion_aws_update.yaml b/test/e2e/data/atlasdeployment_multiregion_aws_update.yaml similarity index 88% rename from test/e2e/data/atlascluster_multiregion_aws_update.yaml rename to test/e2e/data/atlasdeployment_multiregion_aws_update.yaml index 2fc4f299a4..1171cf3fa7 100644 --- a/test/e2e/data/atlascluster_multiregion_aws_update.yaml +++ b/test/e2e/data/atlasdeployment_multiregion_aws_update.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-multiregion-aws + name: atlas-deployment-multiregion-aws spec: projectRef: name: my-project deploymentSpec: - name: cluster-multiregion-aws + name: deployment-multiregion-aws providerBackupEnabled: false clusterType: REPLICASET providerSettings: diff --git a/test/e2e/data/atlascluster_multiregion_azure.yaml b/test/e2e/data/atlasdeployment_multiregion_azure.yaml similarity index 88% rename from test/e2e/data/atlascluster_multiregion_azure.yaml rename to test/e2e/data/atlasdeployment_multiregion_azure.yaml index d160644036..8f457a6527 100644 --- a/test/e2e/data/atlascluster_multiregion_azure.yaml +++ b/test/e2e/data/atlasdeployment_multiregion_azure.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-multiregion-azure + name: atlas-deployment-multiregion-azure spec: projectRef: name: my-project deploymentSpec: - name: cluster-multiregion-azure + name: deployment-multiregion-azure providerBackupEnabled: true clusterType: REPLICASET providerSettings: diff --git a/test/e2e/data/atlascluster_multiregion_azure_update.yaml b/test/e2e/data/atlasdeployment_multiregion_azure_update.yaml similarity index 88% rename from test/e2e/data/atlascluster_multiregion_azure_update.yaml rename to test/e2e/data/atlasdeployment_multiregion_azure_update.yaml index 495c8ee438..8443e4dfd8 100644 --- a/test/e2e/data/atlascluster_multiregion_azure_update.yaml +++ b/test/e2e/data/atlasdeployment_multiregion_azure_update.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-multiregion-azure + name: atlas-deployment-multiregion-azure spec: projectRef: name: my-project deploymentSpec: - name: cluster-multiregion-azure + name: deployment-multiregion-azure providerBackupEnabled: true clusterType: REPLICASET providerSettings: diff --git a/test/e2e/data/atlascluster_multiregion_gcp.yaml b/test/e2e/data/atlasdeployment_multiregion_gcp.yaml similarity index 88% rename from test/e2e/data/atlascluster_multiregion_gcp.yaml rename to test/e2e/data/atlasdeployment_multiregion_gcp.yaml index 3f05614db5..ace578927f 100644 --- a/test/e2e/data/atlascluster_multiregion_gcp.yaml +++ b/test/e2e/data/atlasdeployment_multiregion_gcp.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-multiregion-gcp + name: atlas-deployment-multiregion-gcp spec: projectRef: name: my-project deploymentSpec: - name: cluster-multiregion-gcp + name: deployment-multiregion-gcp providerBackupEnabled: true clusterType: REPLICASET providerSettings: diff --git a/test/e2e/data/atlascluster_serverless.yaml b/test/e2e/data/atlasdeployment_serverless.yaml similarity index 88% rename from test/e2e/data/atlascluster_serverless.yaml rename to test/e2e/data/atlasdeployment_serverless.yaml index 3f65658ad0..3e03c74c8f 100644 --- a/test/e2e/data/atlascluster_serverless.yaml +++ b/test/e2e/data/atlasdeployment_serverless.yaml @@ -1,7 +1,7 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-serverless + name: atlas-deployment-serverless spec: projectRef: name: my-project diff --git a/test/e2e/data/atlascluster_standard.yaml b/test/e2e/data/atlasdeployment_standard.yaml similarity index 77% rename from test/e2e/data/atlascluster_standard.yaml rename to test/e2e/data/atlasdeployment_standard.yaml index 6bcdc173b2..27c416b416 100644 --- a/test/e2e/data/atlascluster_standard.yaml +++ b/test/e2e/data/atlasdeployment_standard.yaml @@ -1,12 +1,12 @@ apiVersion: atlas.mongodb.com/v1 kind: AtlasDeployment metadata: - name: atlascluster-standard + name: atlas-deployment-standard spec: projectRef: name: my-project deploymentSpec: - name: cluster-standard + name: deployment-standard providerSettings: instanceSizeName: M10 providerName: AWS diff --git a/test/e2e/helm_chart_test.go b/test/e2e/helm_chart_test.go index b2daa60141..cb498c3dbd 100644 --- a/test/e2e/helm_chart_test.go +++ b/test/e2e/helm_chart_test.go @@ -32,7 +32,7 @@ var _ = Describe("HELM charts", func() { }) _ = AfterEach(func() { - By("Atfer each.", func() { + By("After each.", func() { GinkgoWriter.Write([]byte("\n")) GinkgoWriter.Write([]byte("===============================================\n")) GinkgoWriter.Write([]byte("Operator namespace: " + data.Resources.Namespace + "\n")) @@ -63,18 +63,18 @@ var _ = Describe("HELM charts", func() { }) DescribeTable("Namespaced operators working only with its own namespace with different configuration", Label("helm-ns"), - func(test model.TestDataProvider, clusterType string) { // clusterType - probably will be moved later () + func(test model.TestDataProvider, deploymentType string) { // deploymentType - probably will be moved later () data = test GinkgoWriter.Println(data.Resources.KeyName) - switch clusterType { + switch deploymentType { case "advanced": - data.Resources.Clusters[0].Spec.AdvancedDeploymentSpec.Name = data.Resources.KeyName + data.Resources.Deployments[0].Spec.AdvancedDeploymentSpec.Name = data.Resources.KeyName case "serverless": - data.Resources.Clusters[0].Spec.ServerlessSpec.Name = data.Resources.KeyName + data.Resources.Deployments[0].Spec.ServerlessSpec.Name = data.Resources.KeyName default: - data.Resources.Clusters[0].Spec.DeploymentSpec.Name = data.Resources.KeyName + data.Resources.Deployments[0].Spec.DeploymentSpec.Name = data.Resources.KeyName } - data.Resources.Clusters[0].ObjectMeta.Name = data.Resources.KeyName + data.Resources.Deployments[0].ObjectMeta.Name = data.Resources.KeyName By("Install CRD", func() { helm.InstallCRD(data.Resources) @@ -82,23 +82,23 @@ var _ = Describe("HELM charts", func() { By("User use helm for deploying namespaces operator", func() { helm.InstallOperatorNamespacedSubmodule(data.Resources) }) - By("User deploy cluster by helm", func() { - helm.InstallClusterSubmodule(data.Resources) + By("User deploy the deployment via helm", func() { + helm.InstallDeploymentSubmodule(data.Resources) }) - waitClusterWithChecks(&data) + waitDeploymentWithChecks(&data) By("Additional check for the current data set", func() { for _, check := range data.Actions { check(&data) } }) - deleteClusterAndOperator(&data) + deleteDeploymentAndOperator(&data) }, Entry("Several actions with helm update", Label("helm-ns-flow"), model.NewTestDataProvider( "helm-ns", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic_helm.yaml"}, + []string{"data/atlasdeployment_basic_helm.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader"). @@ -115,12 +115,12 @@ var _ = Describe("HELM charts", func() { ), "default", ), - Entry("Advanced cluster by helm chart", + Entry("Advanced deployment by helm chart", model.NewTestDataProvider( "helm-advanced", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_advanced_helm.yaml"}, + []string{"data/atlasdeployment_advanced_helm.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -133,12 +133,12 @@ var _ = Describe("HELM charts", func() { ), "advanced", ), - Entry("Advanced multiregion cluster by helm chart", + Entry("Advanced multiregion deployment by helm chart", model.NewTestDataProvider( "helm-advanced-multiregion", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_advanced_multi_region_helm.yaml"}, + []string{"data/atlasdeployment_advanced_multi_region_helm.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -151,12 +151,12 @@ var _ = Describe("HELM charts", func() { ), "advanced", ), - Entry("Serverless cluster by helm chart", + Entry("Serverless deployment by helm chart", model.NewTestDataProvider( "helm-serverless", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_serverless.yaml"}, + []string{"data/atlasdeployment_serverless.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -173,12 +173,12 @@ var _ = Describe("HELM charts", func() { Describe("HELM charts.", Label("helm-wide"), func() { It("User can deploy operator namespaces by using HELM", func() { - By("User creates configuration for a new Project and Cluster", func() { + By("User creates configuration for a new Project and Deployment", func() { data = model.NewTestDataProvider( "helm-wide", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic_helm.yaml"}, + []string{"data/atlasdeployment_basic_helm.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -190,28 +190,28 @@ var _ = Describe("HELM charts", func() { []func(*model.TestDataProvider){}, ) // helm template has equal ObjectMeta.Name and Spec.Name - data.Resources.Clusters[0].ObjectMeta.Name = "cluster-from-helm-wide" - data.Resources.Clusters[0].Spec.DeploymentSpec.Name = "cluster-from-helm-wide" + data.Resources.Deployments[0].ObjectMeta.Name = "deployment-from-helm-wide" + data.Resources.Deployments[0].Spec.DeploymentSpec.Name = "deployment-from-helm-wide" }) By("User use helm for deploying operator", func() { helm.InstallOperatorWideSubmodule(data.Resources) }) - By("User deploy cluster by helm", func() { - helm.InstallClusterSubmodule(data.Resources) + By("User deploy deployment by helm", func() { + helm.InstallDeploymentSubmodule(data.Resources) }) - waitClusterWithChecks(&data) - deleteClusterAndOperator(&data) + waitDeploymentWithChecks(&data) + deleteDeploymentAndOperator(&data) }) }) Describe("HELM charts.", Label("helm-update"), func() { It("User deploy operator and later deploy new version of the Atlas operator", func() { - By("User creates configuration for a new Project, Cluster, DBUser", func() { + By("User creates configuration for a new Project, Deployment, DBUser", func() { data = model.NewTestDataProvider( "helm-upgrade", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic_helm.yaml"}, + []string{"data/atlasdeployment_basic_helm.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("admin"). @@ -223,55 +223,55 @@ var _ = Describe("HELM charts", func() { []func(*model.TestDataProvider){}, ) // helm template has equal ObjectMeta.Name and Spec.Name - data.Resources.Clusters[0].ObjectMeta.Name = "cluster-from-helm-upgrade" - data.Resources.Clusters[0].Spec.DeploymentSpec.Name = "cluster-from-helm-upgrade" + data.Resources.Deployments[0].ObjectMeta.Name = "deployment-from-helm-upgrade" + data.Resources.Deployments[0].Spec.DeploymentSpec.Name = "deployment-from-helm-upgrade" }) By("User use helm for last released version of operator and deploy his resouces", func() { helm.AddMongoDBRepo() helm.InstallOperatorNamespacedFromLatestRelease(data.Resources) - helm.InstallClusterRelease(data.Resources) - waitClusterWithChecks(&data) + helm.InstallDeploymentRelease(data.Resources) + waitDeploymentWithChecks(&data) }) By("User update new released operator", func() { backup := true - data.Resources.Clusters[0].Spec.DeploymentSpec.ProviderBackupEnabled = &backup + data.Resources.Deployments[0].Spec.DeploymentSpec.ProviderBackupEnabled = &backup actions.HelmUpgradeChartVersions(&data) actions.CheckUsersCanUseOldApp(&data) }) By("Delete Resources", func() { - deleteClusterAndOperator(&data) + deleteDeploymentAndOperator(&data) }) }) }) }) -func waitClusterWithChecks(data *model.TestDataProvider) { +func waitDeploymentWithChecks(data *model.TestDataProvider) { By("Wait creation until is done", func() { actions.WaitProject(data, "1") resource, err := kube.GetProjectResource(data) Expect(err).Should(BeNil()) data.Resources.ProjectID = resource.Status.ID - actions.WaitCluster(data.Resources, "1") + actions.WaitDeployment(data.Resources, "1") }) By("Check attributes", func() { - cluster := data.Resources.Clusters[0] + deployment := data.Resources.Deployments[0] switch { - case cluster.Spec.AdvancedDeploymentSpec != nil: + case deployment.Spec.AdvancedDeploymentSpec != nil: atlasClient, err := atlas.AClient() Expect(err).To(BeNil()) - advancedCluster, err := atlasClient.GetAdvancedDeployment(data.Resources.ProjectID, cluster.Spec.AdvancedDeploymentSpec.Name) + advancedDeployment, err := atlasClient.GetAdvancedDeployment(data.Resources.ProjectID, deployment.Spec.AdvancedDeploymentSpec.Name) Expect(err).To(BeNil()) - actions.CompareAdvancedDeploymentsSpec(cluster.Spec, *advancedCluster) - case cluster.Spec.ServerlessSpec != nil: + actions.CompareAdvancedDeploymentsSpec(deployment.Spec, *advancedDeployment) + case deployment.Spec.ServerlessSpec != nil: atlasClient, err := atlas.AClient() Expect(err).To(BeNil()) - serverlessInstance, err := atlasClient.GetServerlessInstance(data.Resources.ProjectID, cluster.Spec.ServerlessSpec.Name) + serverlessInstance, err := atlasClient.GetServerlessInstance(data.Resources.ProjectID, deployment.Spec.ServerlessSpec.Name) Expect(err).To(BeNil()) - actions.CompareServerlessSpec(cluster.Spec, *serverlessInstance) + actions.CompareServerlessSpec(deployment.Spec, *serverlessInstance) default: - uCluster := mongocli.GetClustersInfo(data.Resources.ProjectID, data.Resources.Clusters[0].Spec.DeploymentSpec.Name) - actions.CompareClustersSpec(cluster.Spec, uCluster) + uDeployment := mongocli.GetDeploymentsInfo(data.Resources.ProjectID, data.Resources.Deployments[0].Spec.DeploymentSpec.Name) + actions.CompareDeploymentsSpec(deployment.Spec, uDeployment) } }) @@ -287,15 +287,15 @@ func waitClusterWithChecks(data *model.TestDataProvider) { } } -func deleteClusterAndOperator(data *model.TestDataProvider) { - By("Check project, cluster does not exist", func() { - helm.Uninstall(data.Resources.Clusters[0].Spec.GetClusterName(), data.Resources.Namespace) +func deleteDeploymentAndOperator(data *model.TestDataProvider) { + By("Check project, deployment does not exist", func() { + helm.Uninstall(data.Resources.Deployments[0].Spec.GetDeploymentName(), data.Resources.Namespace) Eventually( func() bool { return mongocli.IsProjectInfoExist(data.Resources.ProjectID) }, "7m", "20s", - ).Should(BeFalse(), "Project and cluster should be deleted from Atlas") + ).Should(BeFalse(), "Project and deployment should be deleted from Atlas") }) By("Delete HELM releases", func() { diff --git a/test/e2e/integration_test.go b/test/e2e/integration_test.go index 93bfae7bbb..18f8488ccd 100644 --- a/test/e2e/integration_test.go +++ b/test/e2e/integration_test.go @@ -20,7 +20,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/model" ) -var _ = Describe("Configuration namespaced. Deploy cluster", Label("integration-ns"), func() { +var _ = Describe("Configuration namespaced. Deploy deployment", Label("integration-ns"), func() { var data model.TestDataProvider var key string diff --git a/test/e2e/model/atlas_key_type.go b/test/e2e/model/atlas_key_type.go index d0b5644407..5dc2f18a18 100644 --- a/test/e2e/model/atlas_key_type.go +++ b/test/e2e/model/atlas_key_type.go @@ -11,7 +11,7 @@ const ( OrgBillingAdmin AtlasRoles = "ORG_BILLING_ADMIN" OrgReadOnly AtlasRoles = "ORG_READ_ONLY" - GroupClusterManager AtlasRoles = "GROUP_CLUSTER_MANAGER" + GroupDeploymentManager AtlasRoles = "GROUP_CLUSTER_MANAGER" GroupDataAccessAdmin AtlasRoles = "GROUP_DATA_ACCESS_ADMIN" GropuDataAccessReadOnly AtlasRoles = "GROUP_DATA_ACCESS_READ_ONLY" GroupDataAccessReadWrite AtlasRoles = "GROUP_DATA_ACCESS_READ_WRITE" diff --git a/test/e2e/model/cluster.go b/test/e2e/model/cluster.go deleted file mode 100644 index 92167666d9..0000000000 --- a/test/e2e/model/cluster.go +++ /dev/null @@ -1,44 +0,0 @@ -package model - -import ( - "path/filepath" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1" - "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" -) - -type AC struct { - metav1.TypeMeta `json:",inline"` - ObjectMeta *metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ClusterSpec `json:"spec,omitempty"` -} - -type ClusterSpec v1.AtlasDeploymentSpec - -func (c ClusterSpec) GetClusterName() string { - if c.AdvancedDeploymentSpec != nil { - return c.AdvancedDeploymentSpec.Name - } - if c.ServerlessSpec != nil { - return c.ServerlessSpec.Name - } - return c.DeploymentSpec.Name -} - -// LoadUserClusterConfig load configuration into object -func LoadUserClusterConfig(path string) AC { - var config AC - utils.ReadInYAMLFileAndConvert(path, &config) - return config -} - -func (ac *AC) ClusterFileName(input UserInputs) string { - // return "data/cluster-" + ac.ObjectMeta.Name + "-" + ac.Spec.Project.Name + ".yaml" - return filepath.Dir(input.ProjectPath) + "/" + ac.ObjectMeta.Name + "-" + ac.Spec.Project.Name + ".yaml" -} - -func (ac *AC) GetClusterNameResource() string { - return "atlasdeployment.atlas.mongodb.com/" + ac.ObjectMeta.Name -} diff --git a/test/e2e/model/dataprovider.go b/test/e2e/model/dataprovider.go index 2fc6bfea78..ef5c457618 100644 --- a/test/e2e/model/dataprovider.go +++ b/test/e2e/model/dataprovider.go @@ -2,23 +2,23 @@ package model // Full Data set for the current test case type TestDataProvider struct { - ConfPaths []string // init clusters configuration + ConfPaths []string // init deployments configuration ConfUpdatePaths []string // update configuration - Resources UserInputs // struct of all user resoucers project,clusters,databaseusers + Resources UserInputs // struct of all user resoucers project,deployments,databaseusers Actions []func(*TestDataProvider) // additional actions for the current data set PortGroup int // ports for the test application starts from _ SkipAppConnectivityCheck bool } -func NewTestDataProvider(keyTestPrefix string, project AProject, r *AtlasKeyType, initClusterConfigs []string, updateClusterConfig []string, users []DBUser, portGroup int, actions []func(*TestDataProvider)) TestDataProvider { +func NewTestDataProvider(keyTestPrefix string, project AProject, r *AtlasKeyType, initDeploymentConfigs []string, updateDeploymentConfig []string, users []DBUser, portGroup int, actions []func(*TestDataProvider)) TestDataProvider { var data TestDataProvider - data.ConfPaths = initClusterConfigs - data.ConfUpdatePaths = updateClusterConfig + data.ConfPaths = initDeploymentConfigs + data.ConfUpdatePaths = updateDeploymentConfig data.Resources = NewUserInputs(keyTestPrefix, project, users, r) data.Actions = actions data.PortGroup = portGroup - for i := range initClusterConfigs { - data.Resources.Clusters = append(data.Resources.Clusters, LoadUserClusterConfig(data.ConfPaths[i])) + for i := range initDeploymentConfigs { + data.Resources.Deployments = append(data.Resources.Deployments, LoadUserDeploymentConfig(data.ConfPaths[i])) } return data } diff --git a/test/e2e/model/deployment.go b/test/e2e/model/deployment.go new file mode 100644 index 0000000000..410d937b55 --- /dev/null +++ b/test/e2e/model/deployment.go @@ -0,0 +1,44 @@ +package model + +import ( + "path/filepath" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1 "github.com/mongodb/mongodb-atlas-kubernetes/pkg/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" +) + +type AtlasDeployment struct { + metav1.TypeMeta `json:",inline"` + ObjectMeta *metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DeploymentSpec `json:"spec,omitempty"` +} + +type DeploymentSpec v1.AtlasDeploymentSpec + +func (spec DeploymentSpec) GetDeploymentName() string { + if spec.AdvancedDeploymentSpec != nil { + return spec.AdvancedDeploymentSpec.Name + } + if spec.ServerlessSpec != nil { + return spec.ServerlessSpec.Name + } + return spec.DeploymentSpec.Name +} + +// LoadUserDeploymentConfig load configuration into object +func LoadUserDeploymentConfig(path string) AtlasDeployment { + var config AtlasDeployment + utils.ReadInYAMLFileAndConvert(path, &config) + return config +} + +func (ad *AtlasDeployment) DeploymentFileName(input UserInputs) string { + // return "data/deployment-" + ac.ObjectMeta.Name + "-" + ac.Spec.Project.Name + ".yaml" + return filepath.Dir(input.ProjectPath) + "/" + ad.ObjectMeta.Name + "-" + ad.Spec.Project.Name + ".yaml" +} + +func (ad *AtlasDeployment) GetDeploymentNameResource() string { + return "atlasdeployment.atlas.mongodb.com/" + ad.ObjectMeta.Name +} diff --git a/test/e2e/model/user_input.go b/test/e2e/model/user_input.go index 778fec5fd9..d576b66afc 100644 --- a/test/e2e/model/user_input.go +++ b/test/e2e/model/user_input.go @@ -15,7 +15,7 @@ type UserInputs struct { KeyName string Namespace string ProjectPath string - Clusters []AC + Deployments []AtlasDeployment Users []DBUser Project *AProject } diff --git a/test/e2e/multinamespace_test.go b/test/e2e/multinamespace_test.go index 775893ad47..d14c2570e1 100644 --- a/test/e2e/multinamespace_test.go +++ b/test/e2e/multinamespace_test.go @@ -46,14 +46,14 @@ var _ = Describe("Users can use clusterwide configuration with limitation to wat }) }) - // (Consider Shared Clusters when E2E tests could conflict with each other) - It("Deploy cluster multinamespaced operator and create resources in each of them", func() { + // (Consider Shared Deployments when E2E tests could conflict with each other) + It("Deploy deployment multinamespaced operator and create resources in each of them", func() { By("Set up test data configuration", func() { watched1 := model.NewTestDataProvider( "multinamestace-watched1", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{}, 30013, @@ -63,7 +63,7 @@ var _ = Describe("Users can use clusterwide configuration with limitation to wat "multinamestace-watched-global", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{}, 30013, @@ -73,7 +73,7 @@ var _ = Describe("Users can use clusterwide configuration with limitation to wat "multinamestace-notwatched", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{}, 30013, @@ -83,7 +83,7 @@ var _ = Describe("Users can use clusterwide configuration with limitation to wat "multinamestace-notwatched", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess().CreateAsGlobalLevelKey(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{}, 30013, @@ -92,7 +92,7 @@ var _ = Describe("Users can use clusterwide configuration with limitation to wat listData = []model.TestDataProvider{watched1, watchedGlobal, notWatched, notWatchedGlobal} watchedNamespace = []string{watched1.Resources.Namespace, watchedGlobal.Resources.Namespace} }) - By("User Install CRD, cluster multinamespace Operator", func() { + By("User Install CRD, deployment multinamespace Operator", func() { for i := range listData { actions.PrepareUsersConfigurations(&listData[i]) } diff --git a/test/e2e/openshift_test.go b/test/e2e/openshift_test.go index 1c1ee9abab..0a8fe45480 100644 --- a/test/e2e/openshift_test.go +++ b/test/e2e/openshift_test.go @@ -66,7 +66,7 @@ var _ = Describe("Openshift UI test", Label("openshift"), func() { "operator-in-openshift", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). diff --git a/test/e2e/operator_type_wide_test.go b/test/e2e/operator_type_wide_test.go index 07f246fad8..bdaac54e2d 100644 --- a/test/e2e/operator_type_wide_test.go +++ b/test/e2e/operator_type_wide_test.go @@ -15,13 +15,13 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/test/e2e/utils" ) -var _ = Describe("Users (Norton and Nimnul) can work with one Cluster wide operator", Label("cluster-wide"), func() { +var _ = Describe("Users (Norton and Nimnul) can work with one Deployment wide operator", Label("deployment-wide"), func() { var NortonData, NimnulData model.TestDataProvider - commonClusterName := "megacluster" + commonDeploymentName := "megadeployment" _ = BeforeEach(func() { Eventually(kubecli.GetVersionOutput()).Should(Say(K8sVersion)) - By("User Install CRD, cluster wide Operator", func() { + By("User Install CRD, deployment wide Operator", func() { Eventually(kubecli.Apply(DefaultDeployConfig)).Should( Say("customresourcedefinition.apiextensions.k8s.io/atlasdeployments.atlas.mongodb.com"), ) @@ -59,14 +59,14 @@ var _ = Describe("Users (Norton and Nimnul) can work with one Cluster wide opera }) }) - // (Consider Shared Clusters when E2E tests could conflict with each other) - It("Deploy cluster wide operator and create resources in each of them", func() { - By("Users can create clusters with the same name", func() { + // (Consider Shared Deployments when E2E tests could conflict with each other) + It("Deploy deployment wide operator and create resources in each of them", func() { + By("Users can create deployments with the same name", func() { NortonData = model.NewTestDataProvider( "norton-wide", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -81,7 +81,7 @@ var _ = Describe("Users (Norton and Nimnul) can work with one Cluster wide opera "nimnul-wide", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_basic.yaml"}, + []string{"data/atlasdeployment_basic.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("reader2"). @@ -92,10 +92,10 @@ var _ = Describe("Users (Norton and Nimnul) can work with one Cluster wide opera 30009, []func(*model.TestDataProvider){}, ) - NortonData.Resources.Clusters[0].ObjectMeta.Name = "norton-cluster" - NortonData.Resources.Clusters[0].Spec.DeploymentSpec.Name = commonClusterName - NimnulData.Resources.Clusters[0].ObjectMeta.Name = "nimnul-cluster" - NimnulData.Resources.Clusters[0].Spec.DeploymentSpec.Name = commonClusterName + NortonData.Resources.Deployments[0].ObjectMeta.Name = "norton-deployment" + NortonData.Resources.Deployments[0].Spec.DeploymentSpec.Name = commonDeploymentName + NimnulData.Resources.Deployments[0].ObjectMeta.Name = "nimnul-deployment" + NimnulData.Resources.Deployments[0].Spec.DeploymentSpec.Name = commonDeploymentName }) By("Deploy users resorces", func() { @@ -105,27 +105,27 @@ var _ = Describe("Users (Norton and Nimnul) can work with one Cluster wide opera actions.DeployUserResourcesAction(&NimnulData) }) - By("Operator working with right cluster if one of the user update configuration", func() { - NortonData.Resources.Clusters[0].Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "something", Value: "awesome"}} + By("Operator working with right deployment if one of the user update configuration", func() { + NortonData.Resources.Deployments[0].Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "something", Value: "awesome"}} utils.SaveToFile( - NortonData.Resources.Clusters[0].ClusterFileName(NortonData.Resources), - utils.JSONToYAMLConvert(NortonData.Resources.Clusters[0]), + NortonData.Resources.Deployments[0].DeploymentFileName(NortonData.Resources), + utils.JSONToYAMLConvert(NortonData.Resources.Deployments[0]), ) - kubecli.Apply(NortonData.Resources.Clusters[0].ClusterFileName(NortonData.Resources), "-n", NortonData.Resources.Namespace) - actions.WaitCluster(NortonData.Resources, "2") + kubecli.Apply(NortonData.Resources.Deployments[0].DeploymentFileName(NortonData.Resources), "-n", NortonData.Resources.Namespace) + actions.WaitDeployment(NortonData.Resources, "2") - By("Norton cluster has labels", func() { + By("Norton deployment has labels", func() { Expect( - kubecli.GetClusterResource(NortonData.Resources.Namespace, NortonData.Resources.Clusters[0].GetClusterNameResource()).Spec.DeploymentSpec.Labels[0], + kubecli.GetDeploymentResource(NortonData.Resources.Namespace, NortonData.Resources.Deployments[0].GetDeploymentNameResource()).Spec.DeploymentSpec.Labels[0], ).To(MatchFields(IgnoreExtras, Fields{ "Key": Equal("something"), "Value": Equal("awesome"), })) }) - By("Nimnul cluster does not have labels", func() { + By("Nimnul deployment does not have labels", func() { Eventually( - kubecli.GetClusterResource(NimnulData.Resources.Namespace, NimnulData.Resources.Clusters[0].GetClusterNameResource()).Spec.DeploymentSpec.Labels, + kubecli.GetDeploymentResource(NimnulData.Resources.Namespace, NimnulData.Resources.Deployments[0].GetDeploymentNameResource()).Spec.DeploymentSpec.Labels, ).Should(BeNil()) }) }) diff --git a/test/e2e/private_link_test.go b/test/e2e/private_link_test.go index b2a384e20a..a69e4ef13d 100644 --- a/test/e2e/private_link_test.go +++ b/test/e2e/private_link_test.go @@ -21,8 +21,8 @@ import ( ) // NOTES -// Feature unavailable in Free and Shared-Tier Clusters -// This feature is not available for M0 free clusters, M2, and M5 clusters. +// Feature unavailable in Free and Shared-Tier Deployments +// This feature is not available for M0 free deployments, M2, and M5 deployments. // tag for test resources "atlas-operator-test" (config.Tag) @@ -85,7 +85,7 @@ var _ = Describe("UserLogin", Label("privatelink"), func() { "privatelink-aws-1", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -107,7 +107,7 @@ var _ = Describe("UserLogin", Label("privatelink"), func() { "privatelink-azure-1", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -127,7 +127,7 @@ var _ = Describe("UserLogin", Label("privatelink"), func() { "privatelink-aws-2", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -153,7 +153,7 @@ var _ = Describe("UserLogin", Label("privatelink"), func() { "privatelink-aws-azure", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). @@ -183,7 +183,7 @@ var _ = Describe("UserLogin", Label("privatelink"), func() { "privatelink-gpc-1", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_backup.yaml"}, + []string{"data/atlasdeployment_backup.yaml"}, []string{}, []model.DBUser{ *model.NewDBUser("user1"). diff --git a/test/e2e/x509_test.go b/test/e2e/x509_test.go index afb5d55aaf..7742e54925 100644 --- a/test/e2e/x509_test.go +++ b/test/e2e/x509_test.go @@ -64,7 +64,7 @@ var _ = Describe("UserLogin", Label("x509auth"), func() { "x509auth", model.AProject{}, model.NewEmptyAtlasKeyType().UseDefaulFullAccess(), - []string{"data/atlascluster_standard.yaml"}, + []string{"data/atlasdeployment_standard.yaml"}, []string{}, []model.DBUser{}, 30000, diff --git a/test/int/clusterwide/dbuser_test.go b/test/int/clusterwide/dbuser_test.go index 1c91d52a21..2a0b90efbf 100644 --- a/test/int/clusterwide/dbuser_test.go +++ b/test/int/clusterwide/dbuser_test.go @@ -23,20 +23,20 @@ const ( DevMode = false UserPasswordSecret = "user-password-secret" DBUserPassword = "Passw0rd!" - // M2 clusters take longer time to apply changes + // M2 Deployments take longer time to apply changes DBUserUpdateTimeout = 170 ProjectCreationTimeout = 40 ) -var _ = Describe("ClusterWide", Label("int", "ClusterWide"), func() { +var _ = Describe("clusterwide", Label("int", "clusterwide"), func() { const interval = time.Second * 1 var ( - connectionSecret corev1.Secret - createdProject *mdbv1.AtlasProject - createdClusterAWS *mdbv1.AtlasDeployment - createdDBUser *mdbv1.AtlasDatabaseUser - secondDBUser *mdbv1.AtlasDatabaseUser + connectionSecret corev1.Secret + createdProject *mdbv1.AtlasProject + createdDeploymentAWS *mdbv1.AtlasDeployment + createdDBUser *mdbv1.AtlasDatabaseUser + secondDBUser *mdbv1.AtlasDatabaseUser ) BeforeEach(func() { @@ -80,7 +80,7 @@ var _ = Describe("ClusterWide", Label("int", "ClusterWide"), func() { Expect(k8sClient.List(context.Background(), &list, client.InNamespace(namespace.Name))).To(Succeed()) for i := range list.Items { - By("Removing Atlas Cluster " + list.Items[i].Name) + By("Removing Atlas Deployment " + list.Items[i].Name) Expect(k8sClient.Delete(context.Background(), &list.Items[i])).To(Succeed()) } for i := range list.Items { @@ -93,10 +93,10 @@ var _ = Describe("ClusterWide", Label("int", "ClusterWide"), func() { } }) - Describe("Create user and cluster in different namespaces", func() { + Describe("Create user and deployment in different namespaces", func() { It("Should Succeed", func() { - clusterNS := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace.Name + "-other-cluster"}} - Expect(k8sClient.Create(context.Background(), &clusterNS)).ToNot(HaveOccurred()) + deploymentNS := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace.Name + "-other-deployment"}} + Expect(k8sClient.Create(context.Background(), &deploymentNS)).ToNot(HaveOccurred()) userNS := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace.Name + "-other-user"}} Expect(k8sClient.Create(context.Background(), &userNS)).ToNot(HaveOccurred()) @@ -105,15 +105,15 @@ var _ = Describe("ClusterWide", Label("int", "ClusterWide"), func() { passwordSecret := buildPasswordSecret(userNS.Name, UserPasswordSecret, DBUserPassword) Expect(k8sClient.Create(context.Background(), &passwordSecret)).To(Succeed()) - createdClusterAWS = mdbv1.DefaultAWSCluster(clusterNS.Name, createdProject.Name).Lightweight() - // The project namespace is different from the cluster one - need to specify explicitly - createdClusterAWS.Spec.Project.Namespace = namespace.Name + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(deploymentNS.Name, createdProject.Name).Lightweight() + // The project namespace is different from the deployment one - need to specify explicitly + createdDeploymentAWS.Spec.Project.Namespace = namespace.Name - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) @@ -123,9 +123,9 @@ var _ = Describe("ClusterWide", Label("int", "ClusterWide"), func() { Eventually(testutil.WaitFor(k8sClient, createdDBUser, status.TrueCondition(status.ReadyType)), DBUserUpdateTimeout, interval, validateDatabaseUserUpdatingFunc()).Should(BeTrue()) - By("Removing the cluster", func() { - Expect(k8sClient.Delete(context.Background(), createdClusterAWS)).To(Succeed()) - Eventually(checkAtlasDeploymentRemoved(createdProject.ID(), createdClusterAWS.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) + By("Removing the deployment", func() { + Expect(k8sClient.Delete(context.Background(), createdDeploymentAWS)).To(Succeed()) + Eventually(checkAtlasDeploymentRemoved(createdProject.ID(), createdDeploymentAWS.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) }) }) }) @@ -164,9 +164,9 @@ func checkAtlasDatabaseUserRemoved(projectID string, user mdbv1.AtlasDatabaseUse } } -func checkAtlasDeploymentRemoved(projectID string, clusterName string) func() bool { +func checkAtlasDeploymentRemoved(projectID string, deploymentName string) func() bool { return func() bool { - _, r, err := atlasClient.Clusters.Get(context.Background(), projectID, clusterName) + _, r, err := atlasClient.Clusters.Get(context.Background(), projectID, deploymentName) if err != nil { if r != nil && r.StatusCode == http.StatusNotFound { return true @@ -189,7 +189,7 @@ func checkAtlasProjectRemoved(projectID string) func() bool { } } -func validateClusterCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResource) { +func validateDeploymentCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResource) { startedCreation := false return func(a mdbv1.AtlasCustomResource) { c := a.(*mdbv1.AtlasDeployment) @@ -200,21 +200,21 @@ func validateClusterCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResou if startedCreation { g.Expect(c.Status.StateName).To(Or(Equal("CREATING"), Equal("IDLE")), fmt.Sprintf("Current conditions: %+v", c.Status.Conditions)) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.ClusterReadyType).WithReason(string(workflow.ClusterCreating)).WithMessageRegexp("cluster is provisioning"), + status.FalseCondition(status.DeploymentReadyType).WithReason(string(workflow.DeploymentCreating)).WithMessageRegexp("deployment is provisioning"), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) g.Expect(c.Status.Conditions).To(ConsistOf(expectedConditionsMatchers)) } else { // Otherwise there could have been some exception in Atlas on creation - let's check the conditions - condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.ClusterReadyType) + condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.DeploymentReadyType) g.Expect(ok).To(BeFalse(), fmt.Sprintf("Unexpected condition: %v", condition)) } } } // nolint -func validateClusterCreatingFunc() func(a mdbv1.AtlasCustomResource) { +func validateDeploymentCreatingFunc() func(a mdbv1.AtlasCustomResource) { startedCreation := false return func(a mdbv1.AtlasCustomResource) { c := a.(*mdbv1.AtlasDeployment) @@ -225,14 +225,14 @@ func validateClusterCreatingFunc() func(a mdbv1.AtlasCustomResource) { if startedCreation { Expect(c.Status.StateName).To(Equal("CREATING"), fmt.Sprintf("Current conditions: %+v", c.Status.Conditions)) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.ClusterReadyType).WithReason(string(workflow.ClusterCreating)).WithMessageRegexp("cluster is provisioning"), + status.FalseCondition(status.DeploymentReadyType).WithReason(string(workflow.DeploymentCreating)).WithMessageRegexp("deployment is provisioning"), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) Expect(c.Status.Conditions).To(ConsistOf(expectedConditionsMatchers)) } else { // Otherwise there could have been some exception in Atlas on creation - let's check the conditions - condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.ClusterReadyType) + condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.DeploymentReadyType) Expect(ok).To(BeFalse(), fmt.Sprintf("Unexpected condition: %v", condition)) } } @@ -242,7 +242,7 @@ func validateDatabaseUserUpdatingFunc() func(a mdbv1.AtlasCustomResource) { return func(a mdbv1.AtlasCustomResource) { d := a.(*mdbv1.AtlasDatabaseUser) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserClustersAppliedChanges)), + status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserDeploymentAppliedChanges)), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) diff --git a/test/int/dbuser_test.go b/test/int/dbuser_test.go index c69047a4c0..5155ccc331 100644 --- a/test/int/dbuser_test.go +++ b/test/int/dbuser_test.go @@ -41,7 +41,7 @@ const ( DBUserPassword = "Passw0rd!" UserPasswordSecret2 = "second-user-password-secret" DBUserPassword2 = "H@lla#!" - // M2 clusters take longer time to apply changes + // M2 deployments take longer time to apply changes DBUserUpdateTimeout = time.Minute * 4 ) @@ -52,13 +52,13 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() ) var ( - connectionSecret corev1.Secret - createdProject *mdbv1.AtlasProject - createdClusterAWS *mdbv1.AtlasDeployment - createdClusterGCP *mdbv1.AtlasDeployment - createdClusterAzure *mdbv1.AtlasDeployment - createdDBUser *mdbv1.AtlasDatabaseUser - secondDBUser *mdbv1.AtlasDatabaseUser + connectionSecret corev1.Secret + createdProject *mdbv1.AtlasProject + createdDeploymentAWS *mdbv1.AtlasDeployment + createdDeploymentGCP *mdbv1.AtlasDeployment + createdDeploymentAzure *mdbv1.AtlasDeployment + createdDBUser *mdbv1.AtlasDatabaseUser + secondDBUser *mdbv1.AtlasDatabaseUser ) BeforeEach(func() { @@ -94,23 +94,23 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() AfterEach(func() { if DevMode { - // No tearDown in dev mode - projects and both clusters will stay in Atlas so it's easier to develop - // tests. Just rerun the test and the project + clusters in Atlas will be reused. + // No tearDown in dev mode - projects and both deployments will stay in Atlas so it's easier to develop + // tests. Just rerun the test and the project + deployments in Atlas will be reused. // We only need to wipe data in the databases. - if createdClusterAWS != nil { - dbClient, err := mongoClient(createdProject.ID(), *createdClusterAWS, *createdDBUser) + if createdDeploymentAWS != nil { + dbClient, err := mongoClient(createdProject.ID(), *createdDeploymentAWS, *createdDBUser) if err == nil { _ = dbClient.Database("test").Collection("operatortest").Drop(context.Background()) } } - if createdClusterGCP != nil { - dbClient, err := mongoClient(createdProject.ID(), *createdClusterGCP, *createdDBUser) + if createdDeploymentGCP != nil { + dbClient, err := mongoClient(createdProject.ID(), *createdDeploymentGCP, *createdDBUser) if err == nil { _ = dbClient.Database("test").Collection("operatortest").Drop(context.Background()) } } - if createdClusterAzure != nil { - dbClient, err := mongoClient(createdProject.ID(), *createdClusterAzure, *createdDBUser) + if createdDeploymentAzure != nil { + dbClient, err := mongoClient(createdProject.ID(), *createdDeploymentAzure, *createdDBUser) if err == nil { _ = dbClient.Database("test").Collection("operatortest").Drop(context.Background()) } @@ -130,11 +130,11 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(k8sClient.List(context.Background(), &list, client.InNamespace(namespace.Name))).To(Succeed()) for i := range list.Items { - By("Removing Atlas Cluster " + list.Items[i].Name) + By("Removing Atlas Deployment " + list.Items[i].Name) Expect(k8sClient.Delete(context.Background(), &list.Items[i])).To(Succeed()) } for i := range list.Items { - Eventually(checkAtlasDeploymentRemoved(createdProject.ID(), list.Items[i].GetClusterName()), 600, interval).Should(BeTrue()) + Eventually(checkAtlasDeploymentRemoved(createdProject.ID(), list.Items[i].GetDeploymentName()), 600, interval).Should(BeTrue()) } By("Removing Atlas Project " + createdProject.Status.ID) @@ -148,31 +148,31 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() return kube.NormalizeIdentifier(createdProject.Spec.Name) + suffix } - byCreatingDefaultAWSandAzureClusters := func() { - By("Creating clusters", func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + byCreatingDefaultAWSandAzureDeployments := func() { + By("Creating deployments", func() { + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) - createdClusterAzure = mdbv1.DefaultAzureCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAzure)).ToNot(HaveOccurred()) + createdDeploymentAzure = mdbv1.DefaultAzureDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAzure)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) - }).WithTimeout(ClusterUpdateTimeout).WithPolling(interval).Should(Succeed()) + }).WithTimeout(DeploymentUpdateTimeout).WithPolling(interval).Should(Succeed()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAzure, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAzure, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) - }).WithTimeout(ClusterUpdateTimeout).WithPolling(interval).Should(Succeed()) + }).WithTimeout(DeploymentUpdateTimeout).WithPolling(interval).Should(Succeed()) }) } - Describe("Create/Update two users, two clusters", func() { + Describe("Create/Update two users, two deployments", func() { It("They should be created successfully", func() { - byCreatingDefaultAWSandAzureClusters() + byCreatingDefaultAWSandAzureDeployments() createdDBUser = mdbv1.DefaultDBUser(namespace.Name, "test-db-user", createdProject.Name).WithPasswordSecret(UserPasswordSecret) By(fmt.Sprintf("Creating the Database User %s", kube.ObjectKeyFromObject(createdDBUser)), func() { @@ -183,20 +183,20 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkUserInAtlas(createdProject.ID(), *createdDBUser) - Expect(tryConnect(createdProject.ID(), *createdClusterAzure, *createdDBUser)).Should(Succeed()) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAzure, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) By("Checking connection Secrets", func() { - validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) - validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 2) }) - By("Checking connectivity to Clusters", func() { + By("Checking connectivity to Deployments", func() { // The user created lacks read/write roles - err := tryWrite(createdProject.ID(), *createdClusterAzure, *createdDBUser, "test", "operatortest") + err := tryWrite(createdProject.ID(), *createdDeploymentAzure, *createdDBUser, "test", "operatortest") Expect(err).To(HaveOccurred()) Expect(err.Error()).To(MatchRegexp("user is not allowed")) - err = tryWrite(createdProject.ID(), *createdClusterAWS, *createdDBUser, "test", "operatortest") + err = tryWrite(createdProject.ID(), *createdDeploymentAWS, *createdDBUser, "test", "operatortest") Expect(err).To(HaveOccurred()) Expect(err.Error()).To(MatchRegexp("user is not allowed")) }) @@ -213,22 +213,22 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkUserInAtlas(createdProject.ID(), *createdDBUser) By("Checking connection Secrets", func() { - validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) - validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 2) }) - By("Checking write permissions for Clusters", func() { - Expect(tryWrite(createdProject.ID(), *createdClusterAzure, *createdDBUser, "test", "operatortest")).Should(Succeed()) - Expect(tryWrite(createdProject.ID(), *createdClusterAWS, *createdDBUser, "test", "operatortest")).Should(Succeed()) + By("Checking write permissions for Deployments", func() { + Expect(tryWrite(createdProject.ID(), *createdDeploymentAzure, *createdDBUser, "test", "operatortest")).Should(Succeed()) + Expect(tryWrite(createdProject.ID(), *createdDeploymentAWS, *createdDBUser, "test", "operatortest")).Should(Succeed()) }) }) - By("Adding second user for Azure cluster only (fails, wrong scope)", func() { + By("Adding second user for Azure deployment only (fails, wrong scope)", func() { secondDBUser = mdbv1.DefaultDBUser(namespace.Name, "second-db-user", createdProject.Name). WithPasswordSecret(UserPasswordSecret2). WithRole("readWrite", "someDB", "thisIsTheOnlyAllowedCollection"). - // Cluster doesn't exist - WithScope(mdbv1.ClusterScopeType, createdClusterAzure.GetClusterName()+"-foo") + // Deployment doesn't exist + WithScope(mdbv1.DeploymentScopeType, createdDeploymentAzure.GetDeploymentName()+"-foo") Expect(k8sClient.Create(context.Background(), secondDBUser)).ToNot(HaveOccurred()) @@ -239,19 +239,19 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() status. FalseCondition(status.DatabaseUserReadyType). WithReason(string(workflow.DatabaseUserInvalidSpec)). - WithMessageRegexp("such cluster doesn't exist in Atlas"), + WithMessageRegexp("such deployment doesn't exist in Atlas"), ), 20, intervalShort, ).Should(BeTrue()) }) By("Fixing second user", func() { - secondDBUser = secondDBUser.ClearScopes().WithScope(mdbv1.ClusterScopeType, createdClusterAzure.Spec.DeploymentSpec.Name) + secondDBUser = secondDBUser.ClearScopes().WithScope(mdbv1.DeploymentScopeType, createdDeploymentAzure.Spec.DeploymentSpec.Name) Expect(k8sClient.Update(context.Background(), secondDBUser)).ToNot(HaveOccurred()) - // First we need to wait for "such cluster doesn't exist in Atlas" error to be gone - Eventually(testutil.WaitFor(k8sClient, secondDBUser, status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserClustersAppliedChanges))), + // First we need to wait for "such deployment doesn't exist in Atlas" error to be gone + Eventually(testutil.WaitFor(k8sClient, secondDBUser, status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserDeploymentAppliedChanges))), 20, intervalShort).Should(BeTrue()) Eventually(testutil.WaitFor(k8sClient, secondDBUser, status.TrueCondition(status.ReadyType), validateDatabaseUserUpdatingFunc()), @@ -260,22 +260,22 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkUserInAtlas(createdProject.ID(), *secondDBUser) By("Checking connection Secrets", func() { - validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) - validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) - validateSecret(k8sClient, *createdProject, *createdClusterAzure, *secondDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *secondDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 3) }) - By("Checking write permissions for Clusters", func() { + By("Checking write permissions for Deployments", func() { // We still can write by the first user - Expect(tryWrite(createdProject.ID(), *createdClusterAzure, *createdDBUser, "test", "testCollection")).Should(Succeed()) - Expect(tryWrite(createdProject.ID(), *createdClusterAWS, *createdDBUser, "test", "testCollection")).Should(Succeed()) + Expect(tryWrite(createdProject.ID(), *createdDeploymentAzure, *createdDBUser, "test", "testCollection")).Should(Succeed()) + Expect(tryWrite(createdProject.ID(), *createdDeploymentAWS, *createdDBUser, "test", "testCollection")).Should(Succeed()) // The second user can eventually write to one collection only - Expect(tryConnect(createdProject.ID(), *createdClusterAzure, *secondDBUser)).Should(Succeed()) - Expect(tryWrite(createdProject.ID(), *createdClusterAzure, *secondDBUser, "someDB", "thisIsTheOnlyAllowedCollection")).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAzure, *secondDBUser)).Should(Succeed()) + Expect(tryWrite(createdProject.ID(), *createdDeploymentAzure, *secondDBUser, "someDB", "thisIsTheOnlyAllowedCollection")).Should(Succeed()) - err := tryWrite(createdProject.ID(), *createdClusterAzure, *secondDBUser, "test", "someNotAllowedCollection") + err := tryWrite(createdProject.ID(), *createdDeploymentAzure, *secondDBUser, "test", "someNotAllowedCollection") Expect(err).To(HaveOccurred()) Expect(err.Error()).To(MatchRegexp("user is not allowed")) }) @@ -283,7 +283,7 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(k8sClient.Delete(context.Background(), secondDBUser)).To(Succeed()) Eventually(checkAtlasDatabaseUserRemoved(createdProject.Status.ID, *secondDBUser), 50, interval).Should(BeTrue()) - secretNames := []string{connSecretname("-test-cluster-azure-second-db-user")} + secretNames := []string{connSecretname("-test-deployment-azure-second-db-user")} Eventually(checkSecretsDontExist(namespace.Name, secretNames), 50, interval).Should(BeTrue()) }) }) @@ -291,7 +291,7 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(k8sClient.Delete(context.Background(), createdDBUser)).To(Succeed()) Eventually(checkAtlasDatabaseUserRemoved(createdProject.Status.ID, *createdDBUser), 50, interval).Should(BeTrue()) - secretNames := []string{connSecretname("-test-cluster-aws-test-db-user"), connSecretname("-test-cluster-azure-test-db-user")} + secretNames := []string{connSecretname("-test-deployment-aws-test-db-user"), connSecretname("-test-deployment-azure-test-db-user")} Eventually(checkSecretsDontExist(namespace.Name, secretNames), 50, interval).Should(BeTrue()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 0) @@ -299,10 +299,10 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() }) }) - // Note, that this test doesn't work with "DevMode=true" as requires the cluster to get created - Describe("Check the reverse order of cluster-user creation (user - first, then - the cluster)", func() { + // Note, that this test doesn't work with "DevMode=true" as requires the deployment to get created + Describe("Check the reverse order of deployment-user creation (user - first, then - the deployment)", func() { It("Should succeed", func() { - // Here we create a database user first - then the cluster + // Here we create a database user first - then the deployment By("Creating database user", func() { createdDBUser = mdbv1.DefaultDBUser(namespace.Name, "test-db-user", createdProject.Name).WithPasswordSecret(UserPasswordSecret) @@ -314,24 +314,24 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkUserInAtlas(createdProject.ID(), *createdDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 0) }) - By("Creating cluster", func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + By("Creating deployment", func() { + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) - // We don't wait for the full cluster creation - only when it has started the process + // We don't wait for the full deployment creation - only when it has started the process Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) - }).WithTimeout(ClusterUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) + }).WithTimeout(DeploymentUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) }) - By("Updating the database user while the cluster is being created", func() { + By("Updating the database user while the deployment is being created", func() { createdDBUser = createdDBUser.WithRole("read", "test", "somecollection") Expect(k8sClient.Update(context.Background(), createdDBUser)).To(Succeed()) - // DatabaseUser will wait for the cluster to get created. + // DatabaseUser will wait for the deployment to get created. Eventually(testutil.WaitFor(k8sClient, createdDBUser, status.TrueCondition(status.ReadyType)), - ClusterUpdateTimeout, interval).Should(BeTrue()) + DeploymentUpdateTimeout, interval).Should(BeTrue()) expectedConditionsMatchers := testutil.MatchConditions( status.TrueCondition(status.DatabaseUserReadyType), @@ -341,9 +341,9 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(createdDBUser.Status.Conditions).To(ConsistOf(expectedConditionsMatchers)) checkUserInAtlas(createdProject.ID(), *createdDBUser) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) By("Checking connection Secrets", func() { - validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 1) }) }) @@ -351,15 +351,15 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() }) Describe("Check the password Secret is watched", func() { It("Should succeed", func() { - By("Creating clusters", func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + By("Creating deployments", func() { + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) - }).WithTimeout(ClusterUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) + }).WithTimeout(DeploymentUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) }) createdDBUser = mdbv1.DefaultDBUser(namespace.Name, "test-db-user", createdProject.Name).WithPasswordSecret(UserPasswordSecret) var connSecretInitial corev1.Secret @@ -372,9 +372,9 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() DBUserUpdateTimeout, interval, validateDatabaseUserUpdatingFunc()).Should(BeTrue()) testutil.EventExists(k8sClient, createdDBUser, "Normal", "Ready", "") - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) - connSecretInitial = validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) + connSecretInitial = validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) Expect(k8sClient.Get(context.Background(), kube.ObjectKey(namespace.Name, UserPasswordSecret), &pwdSecret)).To(Succeed()) Expect(createdDBUser.Status.PasswordVersion).To(Equal(pwdSecret.ResourceVersion)) }) @@ -395,7 +395,7 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() DBUserUpdateTimeout, interval, validateDatabaseUserUpdatingFunc()).Should(BeTrue()) // We need to make sure that the new connection secret is different from the initial one - connSecretUpdated := validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) + connSecretUpdated := validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) Expect(string(connSecretInitial.Data["password"])).To(Equal(DBUserPassword)) Expect(string(connSecretUpdated.Data["password"])).To(Equal("someNewPassw00rd")) @@ -404,13 +404,13 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(updatedPwdSecret.ResourceVersion).NotTo(Equal(pwdSecret.ResourceVersion)) Expect(createdDBUser.Status.PasswordVersion).To(Equal(updatedPwdSecret.ResourceVersion)) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) }) }) }) Describe("Change database users (make sure all stale secrets are removed)", func() { It("Should succeed", func() { - byCreatingDefaultAWSandAzureClusters() + byCreatingDefaultAWSandAzureDeployments() createdDBUser = mdbv1.DefaultDBUser(namespace.Name, "test-db-user", createdProject.Name).WithPasswordSecret(UserPasswordSecret) By(fmt.Sprintf("Creating the Database User %s (no scopes)", kube.ObjectKeyFromObject(createdDBUser)), func() { @@ -421,11 +421,11 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkNumberOfConnectionSecrets(k8sClient, *createdProject, 2) - s1 := validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) - s2 := validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) + s1 := validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) + s2 := validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) testutil.EventExists(k8sClient, createdDBUser, "Normal", atlasdatabaseuser.ConnectionSecretsEnsuredEvent, - fmt.Sprintf("Connection Secrets were created/updated: %s, %s", s1.Name, s2.Name)) + fmt.Sprintf("Connection Secrets were created/updated: (%s|%s|, ){3}", s1.Name, s2.Name)) }) By("Changing the db user name - two stale secret are expected to be removed, two added instead", func() { oldName := createdDBUser.Spec.Username @@ -441,40 +441,40 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Expect(err).To(HaveOccurred()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 2) - secret := validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) - Expect(secret.Name).To(Equal(connSecretname("-test-cluster-azure-new-user"))) - secret = validateSecret(k8sClient, *createdProject, *createdClusterAWS, *createdDBUser) - Expect(secret.Name).To(Equal(connSecretname("-test-cluster-aws-new-user"))) + secret := validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) + Expect(secret.Name).To(Equal(connSecretname("-test-deployment-azure-new-user"))) + secret = validateSecret(k8sClient, *createdProject, *createdDeploymentAWS, *createdDBUser) + Expect(secret.Name).To(Equal(connSecretname("-test-deployment-aws-new-user"))) - Expect(tryConnect(createdProject.ID(), *createdClusterAzure, *createdDBUser)).Should(Succeed()) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAzure, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) }) By("Changing the scopes - one stale secret is expected to be removed", func() { - createdDBUser = createdDBUser.ClearScopes().WithScope(mdbv1.ClusterScopeType, createdClusterAzure.Spec.DeploymentSpec.Name) + createdDBUser = createdDBUser.ClearScopes().WithScope(mdbv1.DeploymentScopeType, createdDeploymentAzure.Spec.DeploymentSpec.Name) Expect(k8sClient.Update(context.Background(), createdDBUser)).To(Succeed()) Eventually(testutil.WaitFor(k8sClient, createdDBUser, status.TrueCondition(status.ReadyType)), DBUserUpdateTimeout, interval, validateDatabaseUserUpdatingFunc()).Should(BeTrue()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 1) - validateSecret(k8sClient, *createdProject, *createdClusterAzure, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeploymentAzure, *createdDBUser) - Expect(tryConnect(createdProject.ID(), *createdClusterAzure, *createdDBUser)).Should(Succeed()) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).ShouldNot(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAzure, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).ShouldNot(Succeed()) }) }) }) Describe("Check the user expiration", func() { It("Should succeed", func() { - By("Creating a AWS cluster", func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).To(Succeed()) + By("Creating a AWS deployment", func() { + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).To(Succeed()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) - }).WithTimeout(ClusterUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) + }).WithTimeout(DeploymentUpdateTimeout).WithPolling(intervalShort).Should(Succeed()) }) By("Creating the expired Database User - no user created in Atlas", func() { @@ -504,7 +504,7 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() checkUserInAtlas(createdProject.ID(), *createdDBUser) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 1) - Expect(tryConnect(createdProject.ID(), *createdClusterAWS, *createdDBUser)).Should(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeploymentAWS, *createdDBUser)).Should(Succeed()) }) By("Extending the expiration", func() { after := time.Now().UTC().Add(time.Hour * 30).Format("2006-01-02T15:04:05") @@ -537,12 +537,12 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() Describe("Deleting the db user (not cleaning Atlas)", func() { It("Should Succeed", func() { By(`Creating the db user with retention policy "keep" first`, func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) @@ -567,11 +567,11 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() It("Should Succeed", func() { By(`Creating the user with reconciliation policy "skip" first`, func() { - createdClusterAWS = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdClusterAWS)).ToNot(HaveOccurred()) + createdDeploymentAWS = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeploymentAWS)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdClusterAWS, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeploymentAWS, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) @@ -604,7 +604,7 @@ var _ = Describe("AtlasDatabaseUser", Label("int", "AtlasDatabaseUser"), func() return false } - Eventually(testutil.WaitForAtlasDatabaseUserStateToNotBeReached(ctx, atlasClient, "admin", createdProject.Name, createdClusterAWS.GetClusterName(), containsDatabaseUser)) + Eventually(testutil.WaitForAtlasDatabaseUserStateToNotBeReached(ctx, atlasClient, "admin", createdProject.Name, createdDeploymentAWS.GetDeploymentName(), containsDatabaseUser)) }) }) }) @@ -649,15 +649,15 @@ func normalize(user mongodbatlas.DatabaseUser, projectID string) mongodbatlas.Da return user } -func tryConnect(projectID string, cluster mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) error { - _, err := mongoClient(projectID, cluster, user) +func tryConnect(projectID string, deployment mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) error { + _, err := mongoClient(projectID, deployment, user) return err } -func mongoClient(projectID string, cluster mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) (*mongo.Client, error) { +func mongoClient(projectID string, deployment mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) (*mongo.Client, error) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - c, _, err := atlasClient.Clusters.Get(context.Background(), projectID, cluster.Spec.DeploymentSpec.Name) + c, _, err := atlasClient.Clusters.Get(context.Background(), projectID, deployment.Spec.DeploymentSpec.Name) Expect(err).NotTo(HaveOccurred()) if c.ConnectionStrings == nil { @@ -685,8 +685,8 @@ type Person struct { Age int `json:"age"` } -func tryWrite(projectID string, cluster mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser, dbName, collectionName string) error { - dbClient, err := mongoClient(projectID, cluster, user) +func tryWrite(projectID string, deployment mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser, dbName, collectionName string) error { + dbClient, err := mongoClient(projectID, deployment, user) Expect(err).NotTo(HaveOccurred()) defer func() { if err = dbClient.Disconnect(context.Background()); err != nil { @@ -713,21 +713,21 @@ func tryWrite(projectID string, cluster mdbv1.AtlasDeployment, user mdbv1.AtlasD Expect(err).NotTo(HaveOccurred()) // Shouldn't return the error - by this step the roles should be propagated Expect(s).To(Equal(p)) - fmt.Fprintf(GinkgoWriter, "User %s (cluster %s) has inserted a single document to %s/%s\n", user.Spec.Username, cluster.GetClusterName(), dbName, collectionName) + fmt.Fprintf(GinkgoWriter, "User %s (deployment %s) has inserted a single document to %s/%s\n", user.Spec.Username, deployment.GetDeploymentName(), dbName, collectionName) return nil } -func validateSecret(k8sClient client.Client, project mdbv1.AtlasProject, cluster mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) corev1.Secret { +func validateSecret(k8sClient client.Client, project mdbv1.AtlasProject, deployment mdbv1.AtlasDeployment, user mdbv1.AtlasDatabaseUser) corev1.Secret { secret := corev1.Secret{} username := user.Spec.Username - secretName := fmt.Sprintf("%s-%s-%s", kube.NormalizeIdentifier(project.Spec.Name), kube.NormalizeIdentifier(cluster.GetClusterName()), kube.NormalizeIdentifier(username)) + secretName := fmt.Sprintf("%s-%s-%s", kube.NormalizeIdentifier(project.Spec.Name), kube.NormalizeIdentifier(deployment.GetDeploymentName()), kube.NormalizeIdentifier(username)) Expect(k8sClient.Get(context.Background(), kube.ObjectKey(project.Namespace, secretName), &secret)).To(Succeed()) GinkgoWriter.Write([]byte(fmt.Sprintf("!! Secret: %v (%v)\n", kube.ObjectKey(project.Namespace, secretName), secret.Namespace+"/"+secret.Name))) password, err := user.ReadPassword(k8sClient) Expect(err).NotTo(HaveOccurred()) - c, _, err := atlasClient.Clusters.Get(context.Background(), project.ID(), cluster.Spec.DeploymentSpec.Name) + c, _, err := atlasClient.Clusters.Get(context.Background(), project.ID(), deployment.Spec.DeploymentSpec.Name) Expect(err).NotTo(HaveOccurred()) expectedData := map[string][]byte{ @@ -740,7 +740,7 @@ func validateSecret(k8sClient client.Client, project mdbv1.AtlasProject, cluster } expectedLabels := map[string]string{ "atlas.mongodb.com/project-id": project.ID(), - "atlas.mongodb.com/cluster-name": cluster.GetClusterName(), + "atlas.mongodb.com/cluster-name": deployment.GetDeploymentName(), connectionsecret.TypeLabelKey: connectionsecret.CredLabelVal, } Expect(secret.Data).To(Equal(expectedData)) @@ -814,7 +814,7 @@ func validateDatabaseUserUpdatingFunc() func(a mdbv1.AtlasCustomResource) { return func(a mdbv1.AtlasCustomResource) { d := a.(*mdbv1.AtlasDatabaseUser) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserClustersAppliedChanges)), + status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserDeploymentAppliedChanges)), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) @@ -828,18 +828,18 @@ func validateDatabaseUserWaitingForCluster() func(a mdbv1.AtlasCustomResource) { d := a.(*mdbv1.AtlasDatabaseUser) // this is the first status that db user gets after update userChangesApplied := testutil.MatchConditions( - status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserClustersAppliedChanges)), + status.FalseCondition(status.DatabaseUserReadyType).WithReason(string(workflow.DatabaseUserDeploymentAppliedChanges)), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) - // this is the status the db user gets to when tries to create connection secrets and sees that the cluster + // this is the status the db user gets to when tries to create connection secrets and sees that the deployment // is not ready - waitingForCluster := testutil.MatchConditions( + waitingForDeployment := testutil.MatchConditions( status.FalseCondition(status.DatabaseUserReadyType). WithReason(string(workflow.DatabaseUserConnectionSecretsNotCreated)). - WithMessageRegexp("Waiting for clusters to get created/updated"), + WithMessageRegexp("Waiting for deployments to get created/updated"), status.FalseCondition(status.ReadyType), ) - Expect(d.Status.Conditions).To(Or(ConsistOf(waitingForCluster), ConsistOf(userChangesApplied))) + Expect(d.Status.Conditions).To(Or(ConsistOf(waitingForDeployment), ConsistOf(userChangesApplied))) } } diff --git a/test/int/deployment_test.go b/test/int/deployment_test.go index 37b4403a3f..2cd8b530e8 100644 --- a/test/int/deployment_test.go +++ b/test/int/deployment_test.go @@ -29,10 +29,10 @@ import ( ) const ( - // Set this to true if you are debugging cluster creation. + // Set this to true if you are debugging deployment creation. // This may not help much if there was the update though... - ClusterDevMode = false - ClusterUpdateTimeout = 40 * time.Minute + DeploymentDevMode = false + DeploymentUpdateTimeout = 40 * time.Minute ) var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { @@ -42,17 +42,17 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { ) var ( - connectionSecret corev1.Secret - createdProject *mdbv1.AtlasProject - createdCluster *mdbv1.AtlasDeployment - lastGeneration int64 - manualDeletion bool + connectionSecret corev1.Secret + createdProject *mdbv1.AtlasProject + createdDeployment *mdbv1.AtlasDeployment + lastGeneration int64 + manualDeletion bool ) BeforeEach(func() { prepareControllers() - createdCluster = &mdbv1.AtlasDeployment{} + createdDeployment = &mdbv1.AtlasDeployment{} lastGeneration = 0 manualDeletion = false @@ -71,7 +71,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { Expect(k8sClient.Create(context.Background(), &connectionSecret)).To(Succeed()) createdProject = mdbv1.DefaultProject(namespace.Name, connectionSecret.Name).WithIPAccessList(project.NewIPAccessList().WithIP("0.0.0.0/0")) - if ClusterDevMode { + if DeploymentDevMode { // While developing tests we need to reuse the same project createdProject.Spec.Name = "dev-test atlas-project" } @@ -82,24 +82,24 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }) AfterEach(func() { - if ClusterDevMode { + if DeploymentDevMode { return } if manualDeletion && createdProject != nil { - By("Deleting the cluster in Atlas manually", func() { - // We need to remove the cluster in Atlas manually to let project get removed - _, err := atlasClient.Clusters.Delete(context.Background(), createdProject.ID(), createdCluster.Spec.DeploymentSpec.Name) + By("Deleting the deployment in Atlas manually", func() { + // We need to remove the deployment in Atlas manually to let project get removed + _, err := atlasClient.Clusters.Delete(context.Background(), createdProject.ID(), createdDeployment.Spec.DeploymentSpec.Name) Expect(err).NotTo(HaveOccurred()) - Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) - createdCluster = nil + Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) + createdDeployment = nil }) } if createdProject != nil && createdProject.Status.ID != "" { - if createdCluster != nil { - By("Removing Atlas Cluster " + createdCluster.Name) - Expect(k8sClient.Delete(context.Background(), createdCluster)).To(Succeed()) + if createdDeployment != nil { + By("Removing Atlas Deployment " + createdDeployment.Name) + Expect(k8sClient.Delete(context.Background(), createdDeployment)).To(Succeed()) - Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdCluster.GetClusterName()), 600, interval).Should(BeTrue()) + Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdDeployment.GetDeploymentName()), 600, interval).Should(BeTrue()) } By("Removing Atlas Project " + createdProject.Status.ID) @@ -109,105 +109,105 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { removeControllersAndNamespace() }) - doRegularClusterStatusChecks := func() { - By("Checking observed Cluster state", func() { - atlasCluster, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name) + doRegularDeploymentStatusChecks := func() { + By("Checking observed Deployment state", func() { + atlasDeployment, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name) Expect(err).ToNot(HaveOccurred()) - Expect(createdCluster.Status.ConnectionStrings).NotTo(BeNil()) - Expect(createdCluster.Status.ConnectionStrings.Standard).To(Equal(atlasCluster.ConnectionStrings.Standard)) - Expect(createdCluster.Status.ConnectionStrings.StandardSrv).To(Equal(atlasCluster.ConnectionStrings.StandardSrv)) - Expect(createdCluster.Status.MongoDBVersion).To(Equal(atlasCluster.MongoDBVersion)) - Expect(createdCluster.Status.MongoURIUpdated).To(Equal(atlasCluster.MongoURIUpdated)) - Expect(createdCluster.Status.StateName).To(Equal("IDLE")) - Expect(createdCluster.Status.Conditions).To(HaveLen(3)) - Expect(createdCluster.Status.Conditions).To(ConsistOf(testutil.MatchConditions( - status.TrueCondition(status.ClusterReadyType), + Expect(createdDeployment.Status.ConnectionStrings).NotTo(BeNil()) + Expect(createdDeployment.Status.ConnectionStrings.Standard).To(Equal(atlasDeployment.ConnectionStrings.Standard)) + Expect(createdDeployment.Status.ConnectionStrings.StandardSrv).To(Equal(atlasDeployment.ConnectionStrings.StandardSrv)) + Expect(createdDeployment.Status.MongoDBVersion).To(Equal(atlasDeployment.MongoDBVersion)) + Expect(createdDeployment.Status.MongoURIUpdated).To(Equal(atlasDeployment.MongoURIUpdated)) + Expect(createdDeployment.Status.StateName).To(Equal("IDLE")) + Expect(createdDeployment.Status.Conditions).To(HaveLen(3)) + Expect(createdDeployment.Status.Conditions).To(ConsistOf(testutil.MatchConditions( + status.TrueCondition(status.DeploymentReadyType), status.TrueCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ))) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(createdCluster.Generation)) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(createdDeployment.Generation)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) }) } doAdvancedDeploymentStatusChecks := func() { - By("Checking observed Advanced Cluster state", func() { - atlasCluster, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.Status.ID, createdCluster.Spec.AdvancedDeploymentSpec.Name) + By("Checking observed Advanced Deployment state", func() { + atlasDeployment, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.Status.ID, createdDeployment.Spec.AdvancedDeploymentSpec.Name) Expect(err).ToNot(HaveOccurred()) - Expect(createdCluster.Status.ConnectionStrings).NotTo(BeNil()) - Expect(createdCluster.Status.ConnectionStrings.Standard).To(Equal(atlasCluster.ConnectionStrings.Standard)) - Expect(createdCluster.Status.ConnectionStrings.StandardSrv).To(Equal(atlasCluster.ConnectionStrings.StandardSrv)) - Expect(createdCluster.Status.MongoDBVersion).To(Equal(atlasCluster.MongoDBVersion)) - Expect(createdCluster.Status.StateName).To(Equal("IDLE")) - Expect(createdCluster.Status.Conditions).To(HaveLen(3)) - Expect(createdCluster.Status.Conditions).To(ConsistOf(testutil.MatchConditions( - status.TrueCondition(status.ClusterReadyType), + Expect(createdDeployment.Status.ConnectionStrings).NotTo(BeNil()) + Expect(createdDeployment.Status.ConnectionStrings.Standard).To(Equal(atlasDeployment.ConnectionStrings.Standard)) + Expect(createdDeployment.Status.ConnectionStrings.StandardSrv).To(Equal(atlasDeployment.ConnectionStrings.StandardSrv)) + Expect(createdDeployment.Status.MongoDBVersion).To(Equal(atlasDeployment.MongoDBVersion)) + Expect(createdDeployment.Status.StateName).To(Equal("IDLE")) + Expect(createdDeployment.Status.Conditions).To(HaveLen(3)) + Expect(createdDeployment.Status.Conditions).To(ConsistOf(testutil.MatchConditions( + status.TrueCondition(status.DeploymentReadyType), status.TrueCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ))) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(createdCluster.Generation)) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(createdDeployment.Generation)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) }) } - doServerlessClusterStatusChecks := func() { + doServerlessDeploymentStatusChecks := func() { By("Checking observed Serverless state", func() { - atlasCluster, _, err := atlasClient.ServerlessInstances.Get(context.Background(), createdProject.Status.ID, createdCluster.Spec.ServerlessSpec.Name) + atlasDeployment, _, err := atlasClient.ServerlessInstances.Get(context.Background(), createdProject.Status.ID, createdDeployment.Spec.ServerlessSpec.Name) Expect(err).ToNot(HaveOccurred()) - Expect(createdCluster.Status.ConnectionStrings).NotTo(BeNil()) - Expect(createdCluster.Status.ConnectionStrings.Standard).To(Equal(atlasCluster.ConnectionStrings.Standard)) - Expect(createdCluster.Status.ConnectionStrings.StandardSrv).To(Equal(atlasCluster.ConnectionStrings.StandardSrv)) - Expect(createdCluster.Status.MongoDBVersion).To(Not(BeEmpty())) - Expect(createdCluster.Status.StateName).To(Equal("IDLE")) - Expect(createdCluster.Status.Conditions).To(HaveLen(3)) - Expect(createdCluster.Status.Conditions).To(ConsistOf(testutil.MatchConditions( - status.TrueCondition(status.ClusterReadyType), + Expect(createdDeployment.Status.ConnectionStrings).NotTo(BeNil()) + Expect(createdDeployment.Status.ConnectionStrings.Standard).To(Equal(atlasDeployment.ConnectionStrings.Standard)) + Expect(createdDeployment.Status.ConnectionStrings.StandardSrv).To(Equal(atlasDeployment.ConnectionStrings.StandardSrv)) + Expect(createdDeployment.Status.MongoDBVersion).To(Not(BeEmpty())) + Expect(createdDeployment.Status.StateName).To(Equal("IDLE")) + Expect(createdDeployment.Status.Conditions).To(HaveLen(3)) + Expect(createdDeployment.Status.Conditions).To(ConsistOf(testutil.MatchConditions( + status.TrueCondition(status.DeploymentReadyType), status.TrueCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ))) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(createdCluster.Generation)) - Expect(createdCluster.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(createdDeployment.Generation)) + Expect(createdDeployment.Status.ObservedGeneration).To(Equal(lastGeneration + 1)) }) } checkAtlasState := func(additionalChecks ...func(c *mongodbatlas.Cluster)) { - By("Verifying Cluster state in Atlas", func() { - atlasCluster, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name) + By("Verifying Deployment state in Atlas", func() { + atlasDeployment, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name) Expect(err).ToNot(HaveOccurred()) - mergedCluster, err := atlasdeployment.MergedCluster(*atlasCluster, createdCluster.Spec) + mergedDeployment, err := atlasdeployment.MergedDeployment(*atlasDeployment, createdDeployment.Spec) Expect(err).ToNot(HaveOccurred()) - Expect(atlasdeployment.ClustersEqual(zap.S(), *atlasCluster, mergedCluster)).To(BeTrue()) + Expect(atlasdeployment.DeploymentsEqual(zap.S(), *atlasDeployment, mergedDeployment)).To(BeTrue()) for _, check := range additionalChecks { - check(atlasCluster) + check(atlasDeployment) } }) } checkAdvancedAtlasState := func(additionalChecks ...func(c *mongodbatlas.AdvancedCluster)) { - By("Verifying Cluster state in Atlas", func() { - atlasCluster, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.Status.ID, createdCluster.Spec.AdvancedDeploymentSpec.Name) + By("Verifying Deployment state in Atlas", func() { + atlasDeployment, _, err := atlasClient.AdvancedClusters.Get(context.Background(), createdProject.Status.ID, createdDeployment.Spec.AdvancedDeploymentSpec.Name) Expect(err).ToNot(HaveOccurred()) - mergedCluster, err := atlasdeployment.MergedAdvancedDeployment(*atlasCluster, createdCluster.Spec) + mergedDeployment, err := atlasdeployment.MergedAdvancedDeployment(*atlasDeployment, createdDeployment.Spec) Expect(err).ToNot(HaveOccurred()) - Expect(atlasdeployment.AdvancedDeploymentsEqual(zap.S(), *atlasCluster, mergedCluster)).To(BeTrue()) + Expect(atlasdeployment.AdvancedDeploymentsEqual(zap.S(), *atlasDeployment, mergedDeployment)).To(BeTrue()) for _, check := range additionalChecks { - check(atlasCluster) + check(atlasDeployment) } }) } checkAdvancedDeploymentOptions := func(specOptions *mdbv1.ProcessArgs) { By("Checking that Atlas Advanced Options are equal to the Spec Options", func() { - atlasOptions, _, err := atlasClient.Clusters.GetProcessArgs(context.Background(), createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name) + atlasOptions, _, err := atlasClient.Clusters.GetProcessArgs(context.Background(), createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name) Expect(err).ToNot(HaveOccurred()) Expect(specOptions.IsEqual(atlasOptions)).To(BeTrue()) @@ -215,52 +215,52 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { } performUpdate := func(timeout interface{}) { - Expect(k8sClient.Update(context.Background(), createdCluster)).To(Succeed()) + Expect(k8sClient.Update(context.Background(), createdDeployment)).To(Succeed()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterUpdatingFunc()), + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentUpdatingFunc()), timeout, interval).Should(BeTrue()) lastGeneration++ } - Describe("Create cluster & change ReplicationSpecs", func() { + Describe("Create deployment & change ReplicationSpecs", func() { It("Should Succeed", func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) // Atlas will add some defaults in case the Atlas Operator doesn't set them - replicationSpecsCheck := func(cluster *mongodbatlas.Cluster) { - Expect(cluster.ReplicationSpecs).To(HaveLen(1)) - Expect(cluster.ReplicationSpecs[0].ID).NotTo(BeNil()) - Expect(cluster.ReplicationSpecs[0].ZoneName).To(Equal("Zone 1")) - Expect(cluster.ReplicationSpecs[0].RegionsConfig).To(HaveLen(1)) - Expect(cluster.ReplicationSpecs[0].RegionsConfig[createdCluster.Spec.DeploymentSpec.ProviderSettings.RegionName]).NotTo(BeNil()) + replicationSpecsCheck := func(deployment *mongodbatlas.Cluster) { + Expect(deployment.ReplicationSpecs).To(HaveLen(1)) + Expect(deployment.ReplicationSpecs[0].ID).NotTo(BeNil()) + Expect(deployment.ReplicationSpecs[0].ZoneName).To(Equal("Zone 1")) + Expect(deployment.ReplicationSpecs[0].RegionsConfig).To(HaveLen(1)) + Expect(deployment.ReplicationSpecs[0].RegionsConfig[createdDeployment.Spec.DeploymentSpec.ProviderSettings.RegionName]).NotTo(BeNil()) } - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), 30*time.Minute, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() - singleNumShard := func(cluster *mongodbatlas.Cluster) { - Expect(cluster.ReplicationSpecs[0].NumShards).To(Equal(int64ptr(1))) + singleNumShard := func(deployment *mongodbatlas.Cluster) { + Expect(deployment.ReplicationSpecs[0].NumShards).To(Equal(int64ptr(1))) } checkAtlasState(replicationSpecsCheck, singleNumShard) }) By("Updating ReplicationSpecs", func() { - createdCluster.Spec.DeploymentSpec.ReplicationSpecs = append(createdCluster.Spec.DeploymentSpec.ReplicationSpecs, mdbv1.ReplicationSpec{ + createdDeployment.Spec.DeploymentSpec.ReplicationSpecs = append(createdDeployment.Spec.DeploymentSpec.ReplicationSpecs, mdbv1.ReplicationSpec{ NumShards: int64ptr(2), }) - createdCluster.Spec.DeploymentSpec.ClusterType = "SHARDED" + createdDeployment.Spec.DeploymentSpec.ClusterType = "SHARDED" performUpdate(40 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() - twoNumShard := func(cluster *mongodbatlas.Cluster) { - Expect(cluster.ReplicationSpecs[0].NumShards).To(Equal(int64ptr(2))) + twoNumShard := func(deployment *mongodbatlas.Cluster) { + Expect(deployment.ReplicationSpecs[0].NumShards).To(Equal(int64ptr(2))) } // ReplicationSpecs has the same defaults but the number of shards has changed checkAtlasState(replicationSpecsCheck, twoNumShard) @@ -268,48 +268,48 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }) }) - Describe("Create cluster & increase DiskSizeGB", func() { + Describe("Create deployment & increase DiskSizeGB", func() { It("Should Succeed", func() { - expectedCluster := mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) + expectedDeployment := mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(expectedCluster)), func() { - createdCluster.ObjectMeta = expectedCluster.ObjectMeta - Expect(k8sClient.Create(context.Background(), expectedCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(expectedDeployment)), func() { + createdDeployment.ObjectMeta = expectedDeployment.ObjectMeta + Expect(k8sClient.Create(context.Background(), expectedDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), 1800, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) By("Increasing InstanceSize", func() { - createdCluster.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M30" + createdDeployment.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M30" performUpdate(40 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) }) }) - Describe("Create cluster & change it to GEOSHARDED", Label("int", "geosharded", "slow"), func() { + Describe("Create deployment & change it to GEOSHARDED", Label("int", "geosharded", "slow"), func() { It("Should Succeed", func() { - expectedCluster := mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) + expectedDeployment := mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(expectedCluster)), func() { - createdCluster.ObjectMeta = expectedCluster.ObjectMeta - Expect(k8sClient.Create(context.Background(), expectedCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(expectedDeployment)), func() { + createdDeployment.ObjectMeta = expectedDeployment.ObjectMeta + Expect(k8sClient.Create(context.Background(), expectedDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) - By("Change cluster to GEOSHARDED", func() { - createdCluster.Spec.DeploymentSpec.ClusterType = "GEOSHARDED" - createdCluster.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{ + By("Change deployment to GEOSHARDED", func() { + createdDeployment.Spec.DeploymentSpec.ClusterType = "GEOSHARDED" + createdDeployment.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{ { NumShards: int64ptr(1), ZoneName: "Zone 1", @@ -330,32 +330,32 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }, } performUpdate(80 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) }) }) - Describe("Create/Update the cluster (more complex scenario)", func() { + Describe("Create/Update the deployment (more complex scenario)", func() { It("Should be created", func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) - createdCluster.Spec.DeploymentSpec.ClusterType = mdbv1.TypeReplicaSet - createdCluster.Spec.DeploymentSpec.AutoScaling = &mdbv1.AutoScalingSpec{ + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) + createdDeployment.Spec.DeploymentSpec.ClusterType = mdbv1.TypeReplicaSet + createdDeployment.Spec.DeploymentSpec.AutoScaling = &mdbv1.AutoScalingSpec{ Compute: &mdbv1.ComputeSpec{ Enabled: boolptr(true), ScaleDownEnabled: boolptr(true), }, DiskGBEnabled: boolptr(false), } - createdCluster.Spec.DeploymentSpec.ProviderSettings.AutoScaling = &mdbv1.AutoScalingSpec{ + createdDeployment.Spec.DeploymentSpec.ProviderSettings.AutoScaling = &mdbv1.AutoScalingSpec{ Compute: &mdbv1.ComputeSpec{ MaxInstanceSize: "M20", MinInstanceSize: "M10", }, } - createdCluster.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M10" - createdCluster.Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "createdBy", Value: "Atlas Operator"}} - createdCluster.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ + createdDeployment.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M10" + createdDeployment.Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "createdBy", Value: "Atlas Operator"}} + createdDeployment.Spec.DeploymentSpec.ReplicationSpecs = []mdbv1.ReplicationSpec{{ NumShards: int64ptr(1), ZoneName: "Zone 1", // One interesting thing: if the regionsConfig is not empty - Atlas nullifies the 'providerSettings.regionName' field @@ -364,12 +364,12 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { "US_WEST_2": {AnalyticsNodes: int64ptr(0), ElectableNodes: int64ptr(2), Priority: int64ptr(7), ReadOnlyNodes: int64ptr(0)}, }, }} - createdCluster.Spec.DeploymentSpec.DiskSizeGB = intptr(10) + createdDeployment.Spec.DeploymentSpec.DiskSizeGB = intptr(10) replicationSpecsCheckFunc := func(c *mongodbatlas.Cluster) { - cluster, err := createdCluster.Spec.Cluster() + deployment, err := createdDeployment.Spec.Deployment() Expect(err).NotTo(HaveOccurred()) - expectedReplicationSpecs := cluster.ReplicationSpecs + expectedReplicationSpecs := deployment.ReplicationSpecs // The ID field is added by Atlas - we don't have it in our specs Expect(c.ReplicationSpecs[0].ID).NotTo(BeNil()) @@ -378,39 +378,39 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { Expect(c.ReplicationSpecs).To(Equal(expectedReplicationSpecs)) } - By("Creating the Cluster", func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).To(Succeed()) + By("Creating the Deployment", func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).To(Succeed()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(replicationSpecsCheckFunc) }) - By("Updating the cluster (multiple operations)", func() { - delete(createdCluster.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig, "US_WEST_2") - createdCluster.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_WEST_1"] = mdbv1.RegionsConfig{AnalyticsNodes: int64ptr(0), ElectableNodes: int64ptr(2), Priority: int64ptr(6), ReadOnlyNodes: int64ptr(0)} - config := createdCluster.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_EAST_1"] + By("Updating the deployment (multiple operations)", func() { + delete(createdDeployment.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig, "US_WEST_2") + createdDeployment.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_WEST_1"] = mdbv1.RegionsConfig{AnalyticsNodes: int64ptr(0), ElectableNodes: int64ptr(2), Priority: int64ptr(6), ReadOnlyNodes: int64ptr(0)} + config := createdDeployment.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_EAST_1"] // Note, that Atlas has strict requirements to priorities - they must start with 7 and be in descending order over the regions config.Priority = int64ptr(7) - createdCluster.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_EAST_1"] = config + createdDeployment.Spec.DeploymentSpec.ReplicationSpecs[0].RegionsConfig["US_EAST_1"] = config - createdCluster.Spec.DeploymentSpec.ProviderSettings.AutoScaling.Compute.MaxInstanceSize = "M30" + createdDeployment.Spec.DeploymentSpec.ProviderSettings.AutoScaling.Compute.MaxInstanceSize = "M30" - performUpdate(ClusterUpdateTimeout) + performUpdate(DeploymentUpdateTimeout) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterUpdatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentUpdatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(replicationSpecsCheckFunc) }) - By("Disable cluster and disk AutoScaling", func() { - createdCluster.Spec.DeploymentSpec.AutoScaling = &mdbv1.AutoScalingSpec{ + By("Disable deployment and disk AutoScaling", func() { + createdDeployment.Spec.DeploymentSpec.AutoScaling = &mdbv1.AutoScalingSpec{ Compute: &mdbv1.ComputeSpec{ Enabled: boolptr(false), ScaleDownEnabled: boolptr(false), @@ -418,18 +418,18 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { DiskGBEnabled: boolptr(false), } - performUpdate(ClusterUpdateTimeout) + performUpdate(DeploymentUpdateTimeout) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterUpdatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentUpdatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - cluster, err := createdCluster.Spec.Cluster() + deployment, err := createdDeployment.Spec.Deployment() Expect(err).NotTo(HaveOccurred()) - Expect(c.AutoScaling.Compute).To(Equal(cluster.AutoScaling.Compute)) + Expect(c.AutoScaling.Compute).To(Equal(deployment.AutoScaling.Compute)) }) }) }) @@ -437,102 +437,102 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { Describe("Create/Update the cluster", func() { It("Should fail, then be fixed (GCP)", func() { - createdCluster = mdbv1.DefaultGCPCluster(namespace.Name, createdProject.Name).WithAtlasName("") + createdDeployment = mdbv1.DefaultGCPDeployment(namespace.Name, createdProject.Name).WithAtlasName("") - By(fmt.Sprintf("Creating the Cluster %s with invalid parameters", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment %s with invalid parameters", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) Eventually( testutil.WaitFor( k8sClient, - createdCluster, + createdDeployment, status. - FalseCondition(status.ClusterReadyType). + FalseCondition(status.DeploymentReadyType). WithReason(string(workflow.Internal)). // Internal due to reconciliation failing on the initial GET request WithMessageRegexp("name is invalid because must be set"), ), 60, interval, ).Should(BeTrue()) - testutil.EventExists(k8sClient, createdCluster, "Warning", string(workflow.Internal), "name is invalid because must be set") + testutil.EventExists(k8sClient, createdDeployment, "Warning", string(workflow.Internal), "name is invalid because must be set") lastGeneration++ }) - By("Fixing the cluster", func() { - createdCluster.Spec.DeploymentSpec.Name = "fixed-cluster" + By("Fixing the deployment", func() { + createdDeployment.Spec.DeploymentSpec.Name = "fixed-deployment" - Expect(k8sClient.Update(context.Background(), createdCluster)).To(Succeed()) + Expect(k8sClient.Update(context.Background(), createdDeployment)).To(Succeed()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType)), + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType)), 20*time.Minute, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) }) It("Should Succeed (AWS)", func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) - By("Updating the Cluster labels", func() { - createdCluster.Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "int-test", Value: "true"}} + By("Updating the Deployment labels", func() { + createdDeployment.Spec.DeploymentSpec.Labels = []common.LabelSpec{{Key: "int-test", Value: "true"}} performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) - By("Updating the Cluster backups settings", func() { - createdCluster.Spec.DeploymentSpec.ProviderBackupEnabled = boolptr(true) + By("Updating the Deployment backups settings", func() { + createdDeployment.Spec.DeploymentSpec.ProviderBackupEnabled = boolptr(true) performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - Expect(c.ProviderBackupEnabled).To(Equal(createdCluster.Spec.DeploymentSpec.ProviderBackupEnabled)) + Expect(c.ProviderBackupEnabled).To(Equal(createdDeployment.Spec.DeploymentSpec.ProviderBackupEnabled)) }) }) - By("Decreasing the Cluster disk size", func() { - createdCluster.Spec.DeploymentSpec.DiskSizeGB = intptr(10) + By("Decreasing the Deployment disk size", func() { + createdDeployment.Spec.DeploymentSpec.DiskSizeGB = intptr(10) performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - Expect(*c.DiskSizeGB).To(BeEquivalentTo(*createdCluster.Spec.DeploymentSpec.DiskSizeGB)) + Expect(*c.DiskSizeGB).To(BeEquivalentTo(*createdDeployment.Spec.DeploymentSpec.DiskSizeGB)) // check whether https://github.com/mongodb/go-client-mongodb-atlas/issues/140 is fixed Expect(c.DiskSizeGB).To(BeAssignableToTypeOf(float64ptr(0)), "DiskSizeGB is no longer a *float64, please check the spec!") }) }) - By("Pausing the cluster", func() { - createdCluster.Spec.DeploymentSpec.Paused = boolptr(true) + By("Pausing the deployment", func() { + createdDeployment.Spec.DeploymentSpec.Paused = boolptr(true) performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - Expect(c.Paused).To(Equal(createdCluster.Spec.DeploymentSpec.Paused)) + Expect(c.Paused).To(Equal(createdDeployment.Spec.DeploymentSpec.Paused)) }) }) - By("Updating the Cluster configuration while paused (should fail)", func() { - createdCluster.Spec.DeploymentSpec.ProviderBackupEnabled = boolptr(false) + By("Updating the Deployment configuration while paused (should fail)", func() { + createdDeployment.Spec.DeploymentSpec.ProviderBackupEnabled = boolptr(false) - Expect(k8sClient.Update(context.Background(), createdCluster)).To(Succeed()) + Expect(k8sClient.Update(context.Background(), createdDeployment)).To(Succeed()) Eventually( testutil.WaitFor( k8sClient, - createdCluster, + createdDeployment, status. - FalseCondition(status.ClusterReadyType). - WithReason(string(workflow.ClusterNotUpdatedInAtlas)). + FalseCondition(status.DeploymentReadyType). + WithReason(string(workflow.DeploymentNotUpdatedInAtlas)). WithMessageRegexp("CANNOT_UPDATE_PAUSED_CLUSTER"), ), 60, @@ -542,34 +542,34 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { lastGeneration++ }) - By("Unpausing the cluster", func() { - createdCluster.Spec.DeploymentSpec.Paused = boolptr(false) + By("Unpausing the deployment", func() { + createdDeployment.Spec.DeploymentSpec.Paused = boolptr(false) performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - Expect(c.Paused).To(Equal(createdCluster.Spec.DeploymentSpec.Paused)) + Expect(c.Paused).To(Equal(createdDeployment.Spec.DeploymentSpec.Paused)) }) }) By("Checking that modifications were applied after unpausing", func() { - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState(func(c *mongodbatlas.Cluster) { - Expect(c.ProviderBackupEnabled).To(Equal(createdCluster.Spec.DeploymentSpec.ProviderBackupEnabled)) + Expect(c.ProviderBackupEnabled).To(Equal(createdDeployment.Spec.DeploymentSpec.ProviderBackupEnabled)) }) }) By("Setting incorrect instance size (should fail)", func() { - oldSizeName := createdCluster.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName - createdCluster.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M42" + oldSizeName := createdDeployment.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName + createdDeployment.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = "M42" - Expect(k8sClient.Update(context.Background(), createdCluster)).To(Succeed()) + Expect(k8sClient.Update(context.Background(), createdDeployment)).To(Succeed()) Eventually( testutil.WaitFor( k8sClient, - createdCluster, + createdDeployment, status. - FalseCondition(status.ClusterReadyType). - WithReason(string(workflow.ClusterNotUpdatedInAtlas)). + FalseCondition(status.DeploymentReadyType). + WithReason(string(workflow.DeploymentNotUpdatedInAtlas)). WithMessageRegexp("INVALID_ENUM_VALUE"), ), 60, @@ -578,17 +578,17 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { lastGeneration++ - By("Fixing the Cluster", func() { - createdCluster.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = oldSizeName + By("Fixing the Deployment", func() { + createdDeployment.Spec.DeploymentSpec.ProviderSettings.InstanceSizeName = oldSizeName performUpdate(20 * time.Minute) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) }) }) }) - Describe("Create DBUser before cluster & check secrets", func() { + Describe("Create DBUser before deployment & check secrets", func() { It("Should Succeed", func() { By(fmt.Sprintf("Creating password Secret %s", UserPasswordSecret), func() { passwordSecret := buildPasswordSecret(UserPasswordSecret, DBUserPassword) @@ -606,7 +606,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { createdDBUserFakeScope := mdbv1.DefaultDBUser(namespace.Name, "test-db-user-fake-scope", createdProject.Name). WithPasswordSecret(UserPasswordSecret). - WithScope(mdbv1.ClusterScopeType, "fake-cluster") + WithScope(mdbv1.DeploymentScopeType, "fake-deployment") By(fmt.Sprintf("Creating the Database User %s", kube.ObjectKeyFromObject(createdDBUserFakeScope)), func() { Expect(k8sClient.Create(context.Background(), createdDBUserFakeScope)).ToNot(HaveOccurred()) @@ -615,35 +615,35 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 0) - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) By("Checking connection Secrets", func() { - Expect(tryConnect(createdProject.ID(), *createdCluster, *createdDBUser)).To(Succeed()) + Expect(tryConnect(createdProject.ID(), *createdDeployment, *createdDBUser)).To(Succeed()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 1) - validateSecret(k8sClient, *createdProject, *createdCluster, *createdDBUser) + validateSecret(k8sClient, *createdProject, *createdDeployment, *createdDBUser) }) }) }) - Describe("Create cluster, user, delete cluster and check secrets are removed", func() { + Describe("Create deployment, user, delete deployment and check secrets are removed", func() { It("Should Succeed", func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Cluster %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) + By(fmt.Sprintf("Creating the Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), - ClusterUpdateTimeout, interval).Should(BeTrue()) + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), + DeploymentUpdateTimeout, interval).Should(BeTrue()) - doRegularClusterStatusChecks() + doRegularDeploymentStatusChecks() checkAtlasState() }) @@ -657,58 +657,58 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { DBUserUpdateTimeout, interval).Should(BeTrue()) }) - By("Removing Atlas Cluster "+createdCluster.Name, func() { - Expect(k8sClient.Delete(context.Background(), createdCluster)).To(Succeed()) - Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) + By("Removing Atlas Deployment "+createdDeployment.Name, func() { + Expect(k8sClient.Delete(context.Background(), createdDeployment)).To(Succeed()) + Eventually(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name), 600, interval).Should(BeTrue()) }) By("Checking that Secrets got removed", func() { - secretNames := []string{kube.NormalizeIdentifier(fmt.Sprintf("%s-%s-%s", createdProject.Spec.Name, createdCluster.Spec.DeploymentSpec.Name, createdDBUser.Spec.Username))} - createdCluster = nil // prevent cleanup from failing due to cluster already deleted + secretNames := []string{kube.NormalizeIdentifier(fmt.Sprintf("%s-%s-%s", createdProject.Spec.Name, createdDeployment.Spec.DeploymentSpec.Name, createdDBUser.Spec.Username))} + createdDeployment = nil // prevent cleanup from failing due to deployment already deleted Eventually(checkSecretsDontExist(namespace.Name, secretNames), 50, interval).Should(BeTrue()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 0) }) }) }) - Describe("Deleting the cluster (not cleaning Atlas)", func() { + Describe("Deleting the deployment (not cleaning Atlas)", func() { It("Should Succeed", func() { - By(`Creating the cluster with retention policy "keep" first`, func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - createdCluster.ObjectMeta.Annotations = map[string]string{customresource.ResourcePolicyAnnotation: customresource.ResourcePolicyKeep} - manualDeletion = true // We need to remove the cluster in Atlas manually to let project get removed - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(`Creating the deployment with retention policy "keep" first`, func() { + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + createdDeployment.ObjectMeta.Annotations = map[string]string{customresource.ResourcePolicyAnnotation: customresource.ResourcePolicyKeep} + manualDeletion = true // We need to remove the deployment in Atlas manually to let project get removed + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) }) - By("Deleting the cluster - stays in Atlas", func() { - Expect(k8sClient.Delete(context.Background(), createdCluster)).To(Succeed()) + By("Deleting the deployment - stays in Atlas", func() { + Expect(k8sClient.Delete(context.Background(), createdDeployment)).To(Succeed()) time.Sleep(5 * time.Minute) - Expect(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdCluster.Spec.DeploymentSpec.Name)()).Should(BeFalse()) + Expect(checkAtlasDeploymentRemoved(createdProject.Status.ID, createdDeployment.Spec.DeploymentSpec.Name)()).Should(BeFalse()) checkNumberOfConnectionSecrets(k8sClient, *createdProject, 0) }) }) }) - Describe("Setting the cluster skip annotation should skip reconciliations.", func() { + Describe("Setting the deployment skip annotation should skip reconciliations.", func() { It("Should Succeed", func() { - By(`Creating the cluster with reconciliation policy "skip" first`, func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).Lightweight() - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(`Creating the deployment with reconciliation policy "skip" first`, func() { + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).Lightweight() + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) - createdCluster.ObjectMeta.Annotations = map[string]string{customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip} - createdCluster.Spec.DeploymentSpec.Labels = append(createdCluster.Spec.DeploymentSpec.Labels, common.LabelSpec{ + createdDeployment.ObjectMeta.Annotations = map[string]string{customresource.ReconciliationPolicyAnnotation: customresource.ReconciliationPolicySkip} + createdDeployment.Spec.DeploymentSpec.Labels = append(createdDeployment.Spec.DeploymentSpec.Labels, common.LabelSpec{ Key: "some-key", Value: "some-value", }) @@ -725,22 +725,22 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { return false } - Expect(k8sClient.Update(context.Background(), createdCluster)).ToNot(HaveOccurred()) - Eventually(testutil.WaitForAtlasDeploymentStateToNotBeReached(ctx, atlasClient, createdProject.Name, createdCluster.GetClusterName(), containsLabel)) + Expect(k8sClient.Update(context.Background(), createdDeployment)).ToNot(HaveOccurred()) + Eventually(testutil.WaitForAtlasDeploymentStateToNotBeReached(ctx, atlasClient, createdProject.Name, createdDeployment.GetDeploymentName(), containsLabel)) }) }) }) - Describe("Create advanced cluster", func() { + Describe("Create advanced deployment", func() { It("Should Succeed", func() { - createdCluster = mdbv1.DefaultAwsAdvancedDeployment(namespace.Name, createdProject.Name) + createdDeployment = mdbv1.DefaultAwsAdvancedDeployment(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Advanced Cluster %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Advanced Deployment %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) @@ -750,52 +750,52 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }) }) - Describe("Set advanced cluster options", func() { + Describe("Set advanced deployment options", func() { It("Should Succeed", func() { - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name) - createdCluster.Spec.ProcessArgs = &mdbv1.ProcessArgs{ + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name) + createdDeployment.Spec.ProcessArgs = &mdbv1.ProcessArgs{ JavascriptEnabled: boolptr(true), DefaultReadConcern: "available", } - By(fmt.Sprintf("Creating the Cluster with Advanced Options %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Deployment with Advanced Options %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) - Eventually(testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFunc()), + Eventually(testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFunc()), 30*time.Minute, interval).Should(BeTrue()) - doRegularClusterStatusChecks() - checkAdvancedDeploymentOptions(createdCluster.Spec.ProcessArgs) + doRegularDeploymentStatusChecks() + checkAdvancedDeploymentOptions(createdDeployment.Spec.ProcessArgs) }) - By("Updating Advanced Cluster Options", func() { - createdCluster.Spec.ProcessArgs.JavascriptEnabled = boolptr(false) + By("Updating Advanced Deployment Options", func() { + createdDeployment.Spec.ProcessArgs.JavascriptEnabled = boolptr(false) performUpdate(40 * time.Minute) - doRegularClusterStatusChecks() - checkAdvancedDeploymentOptions(createdCluster.Spec.ProcessArgs) + doRegularDeploymentStatusChecks() + checkAdvancedDeploymentOptions(createdDeployment.Spec.ProcessArgs) }) }) }) Describe("Create serverless instance", func() { It("Should Succeed", func() { - createdCluster = mdbv1.NewDefaultAWSServerlessInstance(namespace.Name, createdProject.Name) + createdDeployment = mdbv1.NewDefaultAWSServerlessInstance(namespace.Name, createdProject.Name) - By(fmt.Sprintf("Creating the Serverless Instance %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).ToNot(HaveOccurred()) + By(fmt.Sprintf("Creating the Serverless Instance %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).ToNot(HaveOccurred()) Eventually( func(g Gomega) { - success := testutil.WaitFor(k8sClient, createdCluster, status.TrueCondition(status.ReadyType), validateClusterCreatingFuncGContext(g))() + success := testutil.WaitFor(k8sClient, createdDeployment, status.TrueCondition(status.ReadyType), validateDeploymentCreatingFuncGContext(g))() g.Expect(success).To(BeTrue()) }).WithTimeout(30 * time.Minute).WithPolling(interval).Should(Succeed()) - doServerlessClusterStatusChecks() + doServerlessDeploymentStatusChecks() }) }) }) - Describe("Create default cluster with backups enabled", func() { + Describe("Create default deployment with backups enabled", func() { It("Should succeed", func() { backupPolicyDefault := &mdbv1.AtlasBackupPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -837,30 +837,30 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { Expect(k8sClient.Create(context.Background(), backupPolicyDefault)).NotTo(HaveOccurred()) Expect(k8sClient.Create(context.Background(), backupScheduleDefault)).NotTo(HaveOccurred()) - createdCluster = mdbv1.DefaultAWSCluster(namespace.Name, createdProject.Name).WithBackupScheduleRef(common.ResourceRefNamespaced{ + createdDeployment = mdbv1.DefaultAWSDeployment(namespace.Name, createdProject.Name).WithBackupScheduleRef(common.ResourceRefNamespaced{ Name: backupScheduleDefault.Name, Namespace: backupScheduleDefault.Namespace, }) - By(fmt.Sprintf("Creating cluster with backups enabled: %s", kube.ObjectKeyFromObject(createdCluster)), func() { - Expect(k8sClient.Create(context.Background(), createdCluster)).NotTo(HaveOccurred()) + By(fmt.Sprintf("Creating deployment with backups enabled: %s", kube.ObjectKeyFromObject(createdDeployment)), func() { + Expect(k8sClient.Create(context.Background(), createdDeployment)).NotTo(HaveOccurred()) // Do not use Gomega function here like func(g Gomega) as it seems to hang when tests run in parallel Eventually( func() error { - cluster, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.ID(), createdCluster.Spec.DeploymentSpec.Name) + deployment, _, err := atlasClient.Clusters.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.DeploymentSpec.Name) if err != nil { return err } - if cluster.StateName != "IDLE" { - return errors.New("cluster is not IDLE yet") + if deployment.StateName != "IDLE" { + return errors.New("deployment is not IDLE yet") } time.Sleep(10 * time.Second) return nil }).WithTimeout(30 * time.Minute).WithPolling(5 * time.Second).Should(Not(HaveOccurred())) Eventually(func() error { - actualPolicy, _, err := atlasClient.CloudProviderSnapshotBackupPolicies.Get(context.Background(), createdProject.ID(), createdCluster.Spec.DeploymentSpec.Name) + actualPolicy, _, err := atlasClient.CloudProviderSnapshotBackupPolicies.Get(context.Background(), createdProject.ID(), createdDeployment.Spec.DeploymentSpec.Name) if err != nil { return err } @@ -889,7 +889,7 @@ var _ = Describe("AtlasDeployment", Label("int", "AtlasDeployment"), func() { }) }) -func validateClusterCreatingFunc() func(a mdbv1.AtlasCustomResource) { +func validateDeploymentCreatingFunc() func(a mdbv1.AtlasCustomResource) { startedCreation := false return func(a mdbv1.AtlasCustomResource) { c := a.(*mdbv1.AtlasDeployment) @@ -900,20 +900,20 @@ func validateClusterCreatingFunc() func(a mdbv1.AtlasCustomResource) { if startedCreation { Expect(c.Status.StateName).To(Or(Equal("CREATING"), Equal("IDLE")), fmt.Sprintf("Current conditions: %+v", c.Status.Conditions)) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.ClusterReadyType).WithReason(string(workflow.ClusterCreating)).WithMessageRegexp("cluster is provisioning"), + status.FalseCondition(status.DeploymentReadyType).WithReason(string(workflow.DeploymentCreating)).WithMessageRegexp("deployment is provisioning"), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) Expect(c.Status.Conditions).To(ConsistOf(expectedConditionsMatchers)) } else { // Otherwise there could have been some exception in Atlas on creation - let's check the conditions - condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.ClusterReadyType) + condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.DeploymentReadyType) Expect(ok).To(BeFalse(), fmt.Sprintf("Unexpected condition: %v", condition)) } } } -func validateClusterCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResource) { +func validateDeploymentCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResource) { startedCreation := false return func(a mdbv1.AtlasCustomResource) { c := a.(*mdbv1.AtlasDeployment) @@ -924,20 +924,20 @@ func validateClusterCreatingFuncGContext(g Gomega) func(a mdbv1.AtlasCustomResou if startedCreation { g.Expect(c.Status.StateName).To(Or(Equal("CREATING"), Equal("IDLE")), fmt.Sprintf("Current conditions: %+v", c.Status.Conditions)) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.ClusterReadyType).WithReason(string(workflow.ClusterCreating)).WithMessageRegexp("cluster is provisioning"), + status.FalseCondition(status.DeploymentReadyType).WithReason(string(workflow.DeploymentCreating)).WithMessageRegexp("deployment is provisioning"), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) g.Expect(c.Status.Conditions).To(ConsistOf(expectedConditionsMatchers)) } else { // Otherwise there could have been some exception in Atlas on creation - let's check the conditions - condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.ClusterReadyType) + condition, ok := testutil.FindConditionByType(c.Status.Conditions, status.DeploymentReadyType) g.Expect(ok).To(BeFalse(), fmt.Sprintf("Unexpected condition: %v", condition)) } } } -func validateClusterUpdatingFunc() func(a mdbv1.AtlasCustomResource) { +func validateDeploymentUpdatingFunc() func(a mdbv1.AtlasCustomResource) { isIdle := true return func(a mdbv1.AtlasCustomResource) { c := a.(*mdbv1.AtlasDeployment) @@ -949,7 +949,7 @@ func validateClusterUpdatingFunc() func(a mdbv1.AtlasCustomResource) { if !isIdle { Expect(c.Status.StateName).To(Or(Equal("UPDATING"), Equal("REPAIRING")), fmt.Sprintf("Current conditions: %+v", c.Status.Conditions)) expectedConditionsMatchers := testutil.MatchConditions( - status.FalseCondition(status.ClusterReadyType).WithReason(string(workflow.ClusterUpdating)).WithMessageRegexp("cluster is updating"), + status.FalseCondition(status.DeploymentReadyType).WithReason(string(workflow.DeploymentUpdating)).WithMessageRegexp("deployment is updating"), status.FalseCondition(status.ReadyType), status.TrueCondition(status.ValidationSucceeded), ) @@ -958,12 +958,12 @@ func validateClusterUpdatingFunc() func(a mdbv1.AtlasCustomResource) { } } -// checkAtlasDeploymentRemoved returns true if the Atlas Cluster is removed from Atlas. Note the behavior: the cluster +// checkAtlasDeploymentRemoved returns true if the Atlas Deployment is removed from Atlas. Note the behavior: the deployment // is removed from Atlas as soon as the DELETE API call has been made. This is different from the case when the -// cluster is terminated from UI (in this case GET request succeeds while the cluster is being terminated) -func checkAtlasDeploymentRemoved(projectID string, clusterName string) func() bool { +// deployment is terminated from UI (in this case GET request succeeds while the deployment is being terminated) +func checkAtlasDeploymentRemoved(projectID string, deploymentName string) func() bool { return func() bool { - _, r, err := atlasClient.Clusters.Get(context.Background(), projectID, clusterName) + _, r, err := atlasClient.Clusters.Get(context.Background(), projectID, deploymentName) if err != nil { if r != nil && r.StatusCode == http.StatusNotFound { return true diff --git a/test/int/integration_suite_test.go b/test/int/integration_suite_test.go index 74ff769212..ee6ff6fb90 100644 --- a/test/int/integration_suite_test.go +++ b/test/int/integration_suite_test.go @@ -117,10 +117,10 @@ var _ = SynchronizedBeforeSuite(func() []byte { }, func(data []byte) { if os.Getenv("USE_EXISTING_CLUSTER") != "" { var err error - // For the existing cluster we read the kubeconfig + // For the existing deployment we read the kubeconfig cfg, err = config.GetConfig() if err != nil { - panic("Failed to read the config for existing cluster") + panic("Failed to read the config for existing deployment") } } else { d := gob.NewDecoder(bytes.NewReader(data)) @@ -194,7 +194,7 @@ func prepareControllers() { ctrl.SetLogger(zapr.NewLogger(logger)) // Note on the syncPeriod - decreasing this to a smaller time allows to test its work for the long-running tests - // (clusters, database users). The prod value is much higher + // (deployments, database users). The prod value is much higher syncPeriod := time.Minute * 30 k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme,