diff --git a/go.mod b/go.mod index ceca47320..d730c62e2 100644 --- a/go.mod +++ b/go.mod @@ -64,6 +64,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect + github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.4.0 github.com/stretchr/testify v1.8.3 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect diff --git a/go.sum b/go.sum index 21a16b1de..04962e263 100644 --- a/go.sum +++ b/go.sum @@ -135,6 +135,8 @@ github.com/stackitcloud/stackit-sdk-go/services/logme v0.4.0 h1:ZySWV2ZpSWY0qlV1 github.com/stackitcloud/stackit-sdk-go/services/logme v0.4.0/go.mod h1:CG5G3FlJVksfnhcxb8nXYAFxn4lnUBWkDMGNW+6jq2w= github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.4.0 h1:0U8sJFEeYcp9sC9ybqOo0ZM4Rco56wFk+xD4+BltthM= github.com/stackitcloud/stackit-sdk-go/services/mariadb v0.4.0/go.mod h1:3dOLY0zIH/o2iP/DnkM2bnQIW/Dm79XNDw3gBY5se9s= +github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.4.0 h1:KCQeMLLoMW51w/FP7Wa7lNw9L9IFD68plXrPLWzKZEk= +github.com/stackitcloud/stackit-sdk-go/services/mongodbflex v0.4.0/go.mod h1:EKKEf0dl4uBye5iH49delEokDnXXqFsWCWRFyYW/7YM= github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.4.0 h1:cU0qGWg2mW4mRM5xeTaB5DSSFnzetyIx2J+ER3RmdIg= github.com/stackitcloud/stackit-sdk-go/services/objectstorage v0.4.0/go.mod h1:H0B0VBzyW90ksuG+Bu9iqOan80paw+J6Ik9AZuiz9M0= github.com/stackitcloud/stackit-sdk-go/services/opensearch v0.4.0 h1:6Q+ArGyOQDir05pJLc0TbJBGrYyNZ2tKzNY8AGVA0SA= diff --git a/stackit/internal/core/core.go b/stackit/internal/core/core.go index 89d4c8cbc..af9e26d12 100644 --- a/stackit/internal/core/core.go +++ b/stackit/internal/core/core.go @@ -17,19 +17,20 @@ type ProviderData struct { RoundTripper http.RoundTripper ServiceAccountEmail string Region string + ArgusCustomEndpoint string DnsCustomEndpoint string - PostgreSQLCustomEndpoint string - PostgresFlexCustomEndpoint string LogMeCustomEndpoint string - RabbitMQCustomEndpoint string MariaDBCustomEndpoint string + MongoDBFlexCustomEndpoint string ObjectStorageCustomEndpoint string OpenSearchCustomEndpoint string + PostgresFlexCustomEndpoint string + PostgreSQLCustomEndpoint string + RabbitMQCustomEndpoint string RedisCustomEndpoint string + ResourceManagerCustomEndpoint string SecretsManagerCustomEndpoint string - ArgusCustomEndpoint string SKECustomEndpoint string - ResourceManagerCustomEndpoint string } // DiagsToError Converts TF diagnostics' errors into an error with a human-readable description. diff --git a/stackit/internal/services/mongodbflex/instance/datasource.go b/stackit/internal/services/mongodbflex/instance/datasource.go new file mode 100644 index 000000000..d03e304a0 --- /dev/null +++ b/stackit/internal/services/mongodbflex/instance/datasource.go @@ -0,0 +1,224 @@ +package mongodbflex + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &instanceDataSource{} +) + +// NewInstanceDataSource is a helper function to simplify the provider implementation. +func NewInstanceDataSource() datasource.DataSource { + return &instanceDataSource{} +} + +// instanceDataSource is the data source implementation. +type instanceDataSource struct { + client *mongodbflex.APIClient +} + +// Metadata returns the data source type name. +func (r *instanceDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mongodbflex_instance" +} + +// Configure adds the provider configured client to the data source. +func (r *instanceDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *mongodbflex.APIClient + var err error + if providerData.MongoDBFlexCustomEndpoint != "" { + apiClient, err = mongodbflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MongoDBFlexCustomEndpoint), + ) + } else { + apiClient, err = mongodbflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err)) + return + } + + r.client = apiClient + tflog.Info(ctx, "MongoDB Flex instance client configured") +} + +// Schema defines the schema for the data source. +func (r *instanceDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MongoDB Flex instance data source schema.", + "id": "Terraform's internal data source. ID. It is structured as \"`project_id`,`instance_id`\".", + "instance_id": "ID of the MongoDB Flex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "acl": "The Access Control List (ACL) for the MongoDB Flex instance.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Computed: true, + }, + "acl": schema.ListAttribute{ + Description: descriptions["acl"], + ElementType: types.StringType, + Computed: true, + }, + "backup_schedule": schema.StringAttribute{ + Computed: true, + }, + "flavor": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "replicas": schema.Int64Attribute{ + Computed: true, + }, + "storage": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "version": schema.StringAttribute{ + Computed: true, + }, + "options": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.Config.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Calling API: %v", err)) + return + } + + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var options = &optionsModel{} + if !(model.Options.IsNull() || model.Options.IsUnknown()) { + diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + err = mapFields(instanceResp, &model, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "MongoDB Flex instance read") +} diff --git a/stackit/internal/services/mongodbflex/instance/resource.go b/stackit/internal/services/mongodbflex/instance/resource.go new file mode 100644 index 000000000..09336b8de --- /dev/null +++ b/stackit/internal/services/mongodbflex/instance/resource.go @@ -0,0 +1,783 @@ +package mongodbflex + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/conversion" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/validate" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex/wait" +) + +const ( + DefaultBackupSchedule = "0 0/6 * * *" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &instanceResource{} + _ resource.ResourceWithConfigure = &instanceResource{} + _ resource.ResourceWithImportState = &instanceResource{} +) + +type Model struct { + Id types.String `tfsdk:"id"` // needed by TF + InstanceId types.String `tfsdk:"instance_id"` + ProjectId types.String `tfsdk:"project_id"` + Name types.String `tfsdk:"name"` + ACL types.List `tfsdk:"acl"` + BackupSchedule types.String `tfsdk:"backup_schedule"` + Flavor types.Object `tfsdk:"flavor"` + Replicas types.Int64 `tfsdk:"replicas"` + Storage types.Object `tfsdk:"storage"` + Version types.String `tfsdk:"version"` + Options types.Object `tfsdk:"options"` +} + +// Struct corresponding to Model.Flavor +type flavorModel struct { + Id types.String `tfsdk:"id"` + Description types.String `tfsdk:"description"` + CPU types.Int64 `tfsdk:"cpu"` + RAM types.Int64 `tfsdk:"ram"` +} + +// Types corresponding to flavorModel +var flavorTypes = map[string]attr.Type{ + "id": basetypes.StringType{}, + "description": basetypes.StringType{}, + "cpu": basetypes.Int64Type{}, + "ram": basetypes.Int64Type{}, +} + +// Struct corresponding to Model.Storage +type storageModel struct { + Class types.String `tfsdk:"class"` + Size types.Int64 `tfsdk:"size"` +} + +// Types corresponding to storageModel +var storageTypes = map[string]attr.Type{ + "class": basetypes.StringType{}, + "size": basetypes.Int64Type{}, +} + +// Struct corresponding to Model.Options +type optionsModel struct { + Type types.String `tfsdk:"type"` +} + +// Types corresponding to optionsModel +var optionsTypes = map[string]attr.Type{ + "type": basetypes.StringType{}, +} + +// NewInstanceResource is a helper function to simplify the provider implementation. +func NewInstanceResource() resource.Resource { + return &instanceResource{} +} + +// instanceResource is the resource implementation. +type instanceResource struct { + client *mongodbflex.APIClient +} + +// Metadata returns the resource type name. +func (r *instanceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mongodbflex_instance" +} + +// Configure adds the provider configured client to the resource. +func (r *instanceResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + providerData, ok := req.ProviderData.(core.ProviderData) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Expected configure type stackit.ProviderData, got %T", req.ProviderData)) + return + } + + var apiClient *mongodbflex.APIClient + var err error + if providerData.MongoDBFlexCustomEndpoint != "" { + apiClient, err = mongodbflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithEndpoint(providerData.MongoDBFlexCustomEndpoint), + ) + } else { + apiClient, err = mongodbflex.NewAPIClient( + config.WithCustomAuth(providerData.RoundTripper), + config.WithRegion(providerData.Region), + ) + } + + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error configuring API client", fmt.Sprintf("Configuring client: %v", err)) + return + } + + r.client = apiClient + tflog.Info(ctx, "MongoDB Flex instance client configured") +} + +// Schema defines the schema for the resource. +func (r *instanceResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + descriptions := map[string]string{ + "main": "MongoDB Flex instance resource schema.", + "id": "Terraform's internal resource ID. It is structured as \"`project_id`,`instance_id`\".", + "instance_id": "ID of the MongoDB Flex instance.", + "project_id": "STACKIT project ID to which the instance is associated.", + "name": "Instance name.", + "acl": "The Access Control List (ACL) for the MongoDB Flex instance.", + "backup_schedule": `The backup schedule. Should follow the cron scheduling system format (e.g. "0 0 * * *").`, + "options": "Custom parameters for the MongoDB Flex instance.", + } + + resp.Schema = schema.Schema{ + Description: descriptions["main"], + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: descriptions["id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "instance_id": schema.StringAttribute{ + Description: descriptions["instance_id"], + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "project_id": schema.StringAttribute{ + Description: descriptions["project_id"], + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.String{ + validate.UUID(), + validate.NoSeparator(), + }, + }, + "name": schema.StringAttribute{ + Description: descriptions["name"], + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.RegexMatches( + regexp.MustCompile("^[a-z]([-a-z0-9]*[a-z0-9])?$"), + "must start with a letter, must have lower case letters, numbers or hyphens, and no hyphen at the end", + ), + }, + }, + "acl": schema.ListAttribute{ + Description: descriptions["acl"], + ElementType: types.StringType, + Required: true, + }, + "backup_schedule": schema.StringAttribute{ + Computed: true, // Update functionality for this field is currently not working properly on the API side + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Default: stringdefault.StaticString(DefaultBackupSchedule), // Using the same default value as the Portal, as the field is required + }, + "flavor": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "cpu": schema.Int64Attribute{ + Required: true, + }, + "ram": schema.Int64Attribute{ + Required: true, + }, + }, + }, + "replicas": schema.Int64Attribute{ + Required: true, + }, + "storage": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "class": schema.StringAttribute{ + Required: true, + }, + "size": schema.Int64Attribute{ + Required: true, + }, + }, + }, + "version": schema.StringAttribute{ + Required: true, + }, + "options": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + } +} + +// Create creates the resource and sets the initial Terraform state. +func (r *instanceResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + + var acl []string + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + err := loadFlavorId(ctx, r.client, &model, flavor) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Loading flavor ID: %v", err)) + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + var options = &optionsModel{} + if !(model.Options.IsNull() || model.Options.IsUnknown()) { + diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toCreatePayload(&model, acl, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Create new instance + createResp, err := r.client.CreateInstance(ctx, projectId).CreateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Calling API: %v", err)) + return + } + instanceId := *createResp.Id + ctx = tflog.SetField(ctx, "instance_id", instanceId) + wr, err := wait.CreateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Instance creation waiting: %v", err)) + return + } + got, ok := wr.(*mongodbflex.GetInstanceResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Wait result conversion, got %+v", wr)) + return + } + + // Map response body to schema + err = mapFields(got, &model, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + // Set state to fully populated data + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "MongoDB Flex instance created") +} + +// Read refreshes the Terraform state with the latest data. +func (r *instanceResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { // nolint:gocritic // function signature required by Terraform + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + var options = &optionsModel{} + if !(model.Options.IsNull() || model.Options.IsUnknown()) { + diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + instanceResp, err := r.client.GetInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", err.Error()) + return + } + + // Map response body to schema + err = mapFields(instanceResp, &model, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error reading instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + // Set refreshed state + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "MongoDB Flex instance read") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *instanceResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from plan + var model Model + diags := req.Plan.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + var acl []string + if !(model.ACL.IsNull() || model.ACL.IsUnknown()) { + diags = model.ACL.ElementsAs(ctx, &acl, false) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + var flavor = &flavorModel{} + if !(model.Flavor.IsNull() || model.Flavor.IsUnknown()) { + diags = model.Flavor.As(ctx, flavor, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + err := loadFlavorId(ctx, r.client, &model, flavor) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Loading flavor ID: %v", err)) + return + } + } + var storage = &storageModel{} + if !(model.Storage.IsNull() || model.Storage.IsUnknown()) { + diags = model.Storage.As(ctx, storage, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + var options = &optionsModel{} + if !(model.Options.IsNull() || model.Options.IsUnknown()) { + diags = model.Options.As(ctx, options, basetypes.ObjectAsOptions{}) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + } + + // Generate API request body from model + payload, err := toUpdatePayload(&model, acl, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Creating API payload: %v", err)) + return + } + // Update existing instance + _, err = r.client.PartialUpdateInstance(ctx, projectId, instanceId).PartialUpdateInstancePayload(*payload).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", err.Error()) + return + } + wr, err := wait.UpdateInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Instance update waiting: %v", err)) + return + } + got, ok := wr.(*mongodbflex.GetInstanceResponse) + if !ok { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Wait result conversion, got %+v", wr)) + return + } + + // Map response body to schema + err = mapFields(got, &model, flavor, storage, options) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err)) + return + } + diags = resp.State.Set(ctx, model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "MongoDB Flex instance updated") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *instanceResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { // nolint:gocritic // function signature required by Terraform + // Retrieve values from state + var model Model + diags := req.State.Get(ctx, &model) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + projectId := model.ProjectId.ValueString() + instanceId := model.InstanceId.ValueString() + ctx = tflog.SetField(ctx, "project_id", projectId) + ctx = tflog.SetField(ctx, "instance_id", instanceId) + + // Delete existing instance + err := r.client.DeleteInstance(ctx, projectId, instanceId).Execute() + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Calling API: %v", err)) + return + } + _, err = wait.DeleteInstanceWaitHandler(ctx, r.client, projectId, instanceId).SetTimeout(15 * time.Minute).WaitWithContext(ctx) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error deleting instance", fmt.Sprintf("Instance deletion waiting: %v", err)) + return + } + tflog.Info(ctx, "MongoDB Flex instance deleted") +} + +// ImportState imports a resource into the Terraform state on success. +// The expected format of the resource import identifier is: project_id,instance_id +func (r *instanceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, core.Separator) + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + core.LogAndAddError(ctx, &resp.Diagnostics, + "Error importing instance", + fmt.Sprintf("Expected import identifier with format: [project_id],[instance_id] Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("instance_id"), idParts[1])...) + tflog.Info(ctx, "MongoDB Flex instance state imported") +} + +func mapFields(resp *mongodbflex.GetInstanceResponse, model *Model, flavor *flavorModel, storage *storageModel, options *optionsModel) error { + if resp == nil { + return fmt.Errorf("response input is nil") + } + if resp.Item == nil { + return fmt.Errorf("no instance provided") + } + if model == nil { + return fmt.Errorf("model input is nil") + } + instance := resp.Item + + var instanceId string + if model.InstanceId.ValueString() != "" { + instanceId = model.InstanceId.ValueString() + } else if instance.Id != nil { + instanceId = *instance.Id + } else { + return fmt.Errorf("instance id not present") + } + + var aclList basetypes.ListValue + var diags diag.Diagnostics + if instance.Acl == nil || instance.Acl.Items == nil { + aclList = types.ListNull(types.StringType) + } else { + acl := []attr.Value{} + for _, ip := range *instance.Acl.Items { + acl = append(acl, types.StringValue(ip)) + } + aclList, diags = types.ListValue(types.StringType, acl) + if diags.HasError() { + return fmt.Errorf("mapping ACL: %w", core.DiagsToError(diags)) + } + } + + var flavorValues map[string]attr.Value + if instance.Flavor == nil { + flavorValues = map[string]attr.Value{ + "id": flavor.Id, + "description": flavor.Description, + "cpu": flavor.CPU, + "ram": flavor.RAM, + } + } else { + flavorValues = map[string]attr.Value{ + "id": types.StringValue(*instance.Flavor.Id), + "description": types.StringValue(*instance.Flavor.Description), + "cpu": conversion.ToTypeInt64(instance.Flavor.Cpu), + "ram": conversion.ToTypeInt64(instance.Flavor.Memory), + } + } + flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues) + if diags.HasError() { + return fmt.Errorf("creating flavor: %w", core.DiagsToError(diags)) + } + + var storageValues map[string]attr.Value + if instance.Storage == nil { + storageValues = map[string]attr.Value{ + "class": storage.Class, + "size": storage.Size, + } + } else { + storageValues = map[string]attr.Value{ + "class": types.StringValue(*instance.Storage.Class), + "size": conversion.ToTypeInt64(instance.Storage.Size), + } + } + storageObject, diags := types.ObjectValue(storageTypes, storageValues) + if diags.HasError() { + return fmt.Errorf("creating storage: %w", core.DiagsToError(diags)) + } + + var optionsValues map[string]attr.Value + if instance.Options == nil { + optionsValues = map[string]attr.Value{ + "type": options.Type, + } + } else { + optionsValues = map[string]attr.Value{ + "type": types.StringValue((*instance.Options)["type"]), + } + } + optionsObject, diags := types.ObjectValue(optionsTypes, optionsValues) + if diags.HasError() { + return fmt.Errorf("creating options: %w", core.DiagsToError(diags)) + } + + idParts := []string{ + model.ProjectId.ValueString(), + instanceId, + } + model.Id = types.StringValue( + strings.Join(idParts, core.Separator), + ) + model.InstanceId = types.StringValue(instanceId) + model.Name = types.StringPointerValue(instance.Name) + model.ACL = aclList + model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule) + model.Flavor = flavorObject + model.Replicas = conversion.ToTypeInt64(instance.Replicas) + model.Storage = storageObject + model.Version = types.StringPointerValue(instance.Version) + model.Options = optionsObject + return nil +} + +func toCreatePayload(model *Model, acl []string, flavor *flavorModel, storage *storageModel, options *optionsModel) (*mongodbflex.CreateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if acl == nil { + return nil, fmt.Errorf("nil acl") + } + if flavor == nil { + return nil, fmt.Errorf("nil flavor") + } + if storage == nil { + return nil, fmt.Errorf("nil storage") + } + if options == nil { + return nil, fmt.Errorf("nil options") + } + + payloadOptions := make(map[string]string) + if options.Type.ValueString() != "" { + payloadOptions["type"] = options.Type.ValueString() + } + + return &mongodbflex.CreateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &acl, + }, + BackupSchedule: model.BackupSchedule.ValueStringPointer(), + FlavorId: flavor.Id.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Replicas: conversion.ToPtrInt32(model.Replicas), + Storage: &mongodbflex.InstanceStorage{ + Class: storage.Class.ValueStringPointer(), + Size: conversion.ToPtrInt32(storage.Size), + }, + Version: model.Version.ValueStringPointer(), + Options: &payloadOptions, + }, nil +} + +func toUpdatePayload(model *Model, acl []string, flavor *flavorModel, storage *storageModel, options *optionsModel) (*mongodbflex.PartialUpdateInstancePayload, error) { + if model == nil { + return nil, fmt.Errorf("nil model") + } + if acl == nil { + return nil, fmt.Errorf("nil acl") + } + if flavor == nil { + return nil, fmt.Errorf("nil flavor") + } + if storage == nil { + return nil, fmt.Errorf("nil storage") + } + if options == nil { + return nil, fmt.Errorf("nil options") + } + + payloadOptions := make(map[string]string) + if options.Type.ValueString() != "" { + payloadOptions["type"] = options.Type.ValueString() + } + + return &mongodbflex.PartialUpdateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &acl, + }, + BackupSchedule: model.BackupSchedule.ValueStringPointer(), + FlavorId: flavor.Id.ValueStringPointer(), + Name: model.Name.ValueStringPointer(), + Replicas: conversion.ToPtrInt32(model.Replicas), + Storage: &mongodbflex.InstanceStorage{ + Class: storage.Class.ValueStringPointer(), + Size: conversion.ToPtrInt32(storage.Size), + }, + Version: model.Version.ValueStringPointer(), + Options: &payloadOptions, + }, nil +} + +type mongoDBFlexClient interface { + GetFlavorsExecute(ctx context.Context, projectId string) (*mongodbflex.GetFlavorsResponse, error) +} + +func loadFlavorId(ctx context.Context, client mongoDBFlexClient, model *Model, flavor *flavorModel) error { + if model == nil { + return fmt.Errorf("nil model") + } + if flavor == nil { + return fmt.Errorf("nil flavor") + } + cpu := conversion.ToPtrInt32(flavor.CPU) + if cpu == nil { + return fmt.Errorf("nil CPU") + } + ram := conversion.ToPtrInt32(flavor.RAM) + if ram == nil { + return fmt.Errorf("nil RAM") + } + + projectId := model.ProjectId.ValueString() + res, err := client.GetFlavorsExecute(ctx, projectId) + if err != nil { + return fmt.Errorf("listing mongodbflex flavors: %w", err) + } + + avl := "" + if res.Flavors == nil { + return fmt.Errorf("finding flavors for project %s", projectId) + } + for _, f := range *res.Flavors { + if f.Id == nil || f.Cpu == nil || f.Memory == nil { + continue + } + if *f.Cpu == *cpu && *f.Memory == *ram { + flavor.Id = types.StringValue(*f.Id) + flavor.Description = types.StringValue(*f.Description) + break + } + avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Cpu) + } + if flavor.Id.ValueString() == "" { + return fmt.Errorf("couldn't find flavor, available specs are:%s", avl) + } + + return nil +} diff --git a/stackit/internal/services/mongodbflex/instance/resource_test.go b/stackit/internal/services/mongodbflex/instance/resource_test.go new file mode 100644 index 000000000..6008d2223 --- /dev/null +++ b/stackit/internal/services/mongodbflex/instance/resource_test.go @@ -0,0 +1,750 @@ +package mongodbflex + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex" +) + +type mongoDBFlexClientMocked struct { + returnError bool + getFlavorsResp *mongodbflex.GetFlavorsResponse +} + +func (c *mongoDBFlexClientMocked) GetFlavorsExecute(_ context.Context, _ string) (*mongodbflex.GetFlavorsResponse, error) { + if c.returnError { + return nil, fmt.Errorf("get flavors failed") + } + + return c.getFlavorsResp, nil +} + +func TestMapFields(t *testing.T) { + tests := []struct { + description string + input *mongodbflex.GetInstanceResponse + flavor *flavorModel + storage *storageModel + options *optionsModel + expected Model + isValid bool + }{ + { + "default_values", + &mongodbflex.GetInstanceResponse{ + Item: &mongodbflex.InstanceSingleInstance{}, + }, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringNull(), + ACL: types.ListNull(types.StringType), + BackupSchedule: types.StringNull(), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringNull(), + "description": types.StringNull(), + "cpu": types.Int64Null(), + "ram": types.Int64Null(), + }), + Replicas: types.Int64Null(), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringNull(), + "size": types.Int64Null(), + }), + Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{ + "type": types.StringNull(), + }), + Version: types.StringNull(), + }, + true, + }, + { + "simple_values", + &mongodbflex.GetInstanceResponse{ + Item: &mongodbflex.InstanceSingleInstance{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "ip1", + "ip2", + "", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + Flavor: &mongodbflex.InstanceFlavor{ + Cpu: utils.Ptr(int32(12)), + Description: utils.Ptr("description"), + Id: utils.Ptr("flavor_id"), + Memory: utils.Ptr(int32(34)), + }, + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(56)), + Status: utils.Ptr("status"), + Storage: &mongodbflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(78)), + }, + Options: &map[string]string{ + "type": "type", + }, + Version: utils.Ptr("version"), + }, + }, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + ACL: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ip1"), + types.StringValue("ip2"), + types.StringValue(""), + }), + BackupSchedule: types.StringValue("schedule"), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringValue("flavor_id"), + "description": types.StringValue("description"), + "cpu": types.Int64Value(12), + "ram": types.Int64Value(34), + }), + Replicas: types.Int64Value(56), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringValue("class"), + "size": types.Int64Value(78), + }), + Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{ + "type": types.StringValue("type"), + }), + Version: types.StringValue("version"), + }, + true, + }, + { + "simple_values_no_flavor_and_storage", + &mongodbflex.GetInstanceResponse{ + Item: &mongodbflex.InstanceSingleInstance{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "ip1", + "ip2", + "", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + Flavor: nil, + Id: utils.Ptr("iid"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(56)), + Status: utils.Ptr("status"), + Storage: nil, + Options: &map[string]string{ + "type": "type", + }, + Version: utils.Ptr("version"), + }, + }, + &flavorModel{ + CPU: types.Int64Value(12), + RAM: types.Int64Value(34), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(78), + }, + &optionsModel{ + Type: types.StringValue("type"), + }, + Model{ + Id: types.StringValue("pid,iid"), + InstanceId: types.StringValue("iid"), + ProjectId: types.StringValue("pid"), + Name: types.StringValue("name"), + ACL: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("ip1"), + types.StringValue("ip2"), + types.StringValue(""), + }), + BackupSchedule: types.StringValue("schedule"), + Flavor: types.ObjectValueMust(flavorTypes, map[string]attr.Value{ + "id": types.StringNull(), + "description": types.StringNull(), + "cpu": types.Int64Value(12), + "ram": types.Int64Value(34), + }), + Replicas: types.Int64Value(56), + Storage: types.ObjectValueMust(storageTypes, map[string]attr.Value{ + "class": types.StringValue("class"), + "size": types.Int64Value(78), + }), + Options: types.ObjectValueMust(optionsTypes, map[string]attr.Value{ + "type": types.StringValue("type"), + }), + Version: types.StringValue("version"), + }, + true, + }, + { + "nil_response", + nil, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + Model{}, + false, + }, + { + "no_resource_id", + &mongodbflex.GetInstanceResponse{}, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + Model{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + state := &Model{ + ProjectId: tt.expected.ProjectId, + InstanceId: tt.expected.InstanceId, + } + err := mapFields(tt.input, state, tt.flavor, tt.storage, tt.options) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(state, &tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToCreatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputAcl []string + inputFlavor *flavorModel + inputStorage *storageModel + inputOptions *optionsModel + expected *mongodbflex.CreateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + &mongodbflex.CreateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{}, + }, + Storage: &mongodbflex.InstanceStorage{}, + Options: &map[string]string{}, + }, + true, + }, + { + "simple_values", + &Model{ + BackupSchedule: types.StringValue("schedule"), + Name: types.StringValue("name"), + Replicas: types.Int64Value(12), + Version: types.StringValue("version"), + }, + []string{ + "ip_1", + "ip_2", + }, + &flavorModel{ + Id: types.StringValue("flavor_id"), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(34), + }, + &optionsModel{ + Type: types.StringValue("type"), + }, + &mongodbflex.CreateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "ip_1", + "ip_2", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + FlavorId: utils.Ptr("flavor_id"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(12)), + Storage: &mongodbflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(34)), + }, + Options: &map[string]string{"type": "type"}, + Version: utils.Ptr("version"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + BackupSchedule: types.StringNull(), + Name: types.StringNull(), + Replicas: types.Int64Value(2123456789), + Version: types.StringNull(), + }, + []string{ + "", + }, + &flavorModel{ + Id: types.StringNull(), + }, + &storageModel{ + Class: types.StringNull(), + Size: types.Int64Null(), + }, + &optionsModel{ + Type: types.StringNull(), + }, + &mongodbflex.CreateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "", + }, + }, + BackupSchedule: nil, + FlavorId: nil, + Name: nil, + Replicas: utils.Ptr(int32(2123456789)), + Storage: &mongodbflex.InstanceStorage{ + Class: nil, + Size: nil, + }, + Options: &map[string]string{}, + Version: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_acl", + &Model{}, + nil, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_flavor", + &Model{}, + []string{}, + nil, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_storage", + &Model{}, + []string{}, + &flavorModel{}, + nil, + &optionsModel{}, + nil, + false, + }, + { + "nil_options", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toCreatePayload(tt.input, tt.inputAcl, tt.inputFlavor, tt.inputStorage, tt.inputOptions) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestToUpdatePayload(t *testing.T) { + tests := []struct { + description string + input *Model + inputAcl []string + inputFlavor *flavorModel + inputStorage *storageModel + inputOptions *optionsModel + expected *mongodbflex.PartialUpdateInstancePayload + isValid bool + }{ + { + "default_values", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + &mongodbflex.PartialUpdateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{}, + }, + Storage: &mongodbflex.InstanceStorage{}, + Options: &map[string]string{}, + }, + true, + }, + { + "simple_values", + &Model{ + BackupSchedule: types.StringValue("schedule"), + Name: types.StringValue("name"), + Replicas: types.Int64Value(12), + Version: types.StringValue("version"), + }, + []string{ + "ip_1", + "ip_2", + }, + &flavorModel{ + Id: types.StringValue("flavor_id"), + }, + &storageModel{ + Class: types.StringValue("class"), + Size: types.Int64Value(34), + }, + &optionsModel{ + Type: types.StringValue("type"), + }, + &mongodbflex.PartialUpdateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "ip_1", + "ip_2", + }, + }, + BackupSchedule: utils.Ptr("schedule"), + FlavorId: utils.Ptr("flavor_id"), + Name: utils.Ptr("name"), + Replicas: utils.Ptr(int32(12)), + Storage: &mongodbflex.InstanceStorage{ + Class: utils.Ptr("class"), + Size: utils.Ptr(int32(34)), + }, + Options: &map[string]string{"type": "type"}, + Version: utils.Ptr("version"), + }, + true, + }, + { + "null_fields_and_int_conversions", + &Model{ + BackupSchedule: types.StringNull(), + Name: types.StringNull(), + Replicas: types.Int64Value(2123456789), + Version: types.StringNull(), + }, + []string{ + "", + }, + &flavorModel{ + Id: types.StringNull(), + }, + &storageModel{ + Class: types.StringNull(), + Size: types.Int64Null(), + }, + &optionsModel{ + Type: types.StringNull(), + }, + &mongodbflex.PartialUpdateInstancePayload{ + Acl: &mongodbflex.InstanceAcl{ + Items: &[]string{ + "", + }, + }, + BackupSchedule: nil, + FlavorId: nil, + Name: nil, + Replicas: utils.Ptr(int32(2123456789)), + Storage: &mongodbflex.InstanceStorage{ + Class: nil, + Size: nil, + }, + Options: &map[string]string{}, + Version: nil, + }, + true, + }, + { + "nil_model", + nil, + []string{}, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_acl", + &Model{}, + nil, + &flavorModel{}, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_flavor", + &Model{}, + []string{}, + nil, + &storageModel{}, + &optionsModel{}, + nil, + false, + }, + { + "nil_storage", + &Model{}, + []string{}, + &flavorModel{}, + nil, + &optionsModel{}, + nil, + false, + }, + { + "nil_options", + &Model{}, + []string{}, + &flavorModel{}, + &storageModel{}, + nil, + nil, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + output, err := toUpdatePayload(tt.input, tt.inputAcl, tt.inputFlavor, tt.inputStorage, tt.inputOptions) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(output, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} + +func TestLoadFlavorId(t *testing.T) { + tests := []struct { + description string + inputFlavor *flavorModel + mockedResp *mongodbflex.GetFlavorsResponse + expected *flavorModel + getFlavorsFails bool + isValid bool + }{ + { + "ok_flavor", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &mongodbflex.GetFlavorsResponse{ + Flavors: &[]mongodbflex.HandlersInfraFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(2)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + }, + }, + &flavorModel{ + Id: types.StringValue("fid-1"), + Description: types.StringValue("description"), + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + true, + }, + { + "ok_flavor_2", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &mongodbflex.GetFlavorsResponse{ + Flavors: &[]mongodbflex.HandlersInfraFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(2)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + { + Id: utils.Ptr("fid-2"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(4)), + }, + }, + }, + &flavorModel{ + Id: types.StringValue("fid-1"), + Description: types.StringValue("description"), + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + true, + }, + { + "no_matching_flavor", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &mongodbflex.GetFlavorsResponse{ + Flavors: &[]mongodbflex.HandlersInfraFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + { + Id: utils.Ptr("fid-2"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(4)), + }, + }, + }, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + false, + }, + { + "nil_response", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &mongodbflex.GetFlavorsResponse{}, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + false, + }, + { + "error_response", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &mongodbflex.GetFlavorsResponse{}, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + true, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + client := &mongoDBFlexClientMocked{ + returnError: tt.getFlavorsFails, + getFlavorsResp: tt.mockedResp, + } + model := &Model{ + ProjectId: types.StringValue("pid"), + } + flavorModel := &flavorModel{ + CPU: tt.inputFlavor.CPU, + RAM: tt.inputFlavor.RAM, + } + err := loadFlavorId(context.Background(), client, model, flavorModel) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(flavorModel, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/internal/services/mongodbflex/mongodbflex_acc_test.go b/stackit/internal/services/mongodbflex/mongodbflex_acc_test.go new file mode 100644 index 000000000..bc60878a9 --- /dev/null +++ b/stackit/internal/services/mongodbflex/mongodbflex_acc_test.go @@ -0,0 +1,227 @@ +package mongodbflex_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stackitcloud/stackit-sdk-go/core/utils" + + "github.com/stackitcloud/stackit-sdk-go/core/config" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex" + "github.com/stackitcloud/stackit-sdk-go/services/mongodbflex/wait" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/core" + "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/testutil" +) + +// Instance resource data +var instanceResource = map[string]string{ + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), + "acl": "192.168.0.0/16", + "flavor_cpu": "2", + "flavor_ram": "4", + "flavor_description": "Small, Compute optimized", + "replicas": "1", + "storage_class": "premium-perf2-mongodb", + "storage_size": "10", + "version": "5.0", + "version_updated": "6.0", + "options_type": "Single", + "flavor_id": "2.4", +} + +func configResources(version string) string { + return fmt.Sprintf(` + %s + + resource "stackit_mongodbflex_instance" "instance" { + project_id = "%s" + name = "%s" + acl = ["%s"] + flavor = { + cpu = %s + ram = %s + } + replicas = %s + storage = { + class = "%s" + size = %s + } + version = "%s" + options = { + type = "%s" + } + } + `, + testutil.MongoDBFlexProviderConfig(), + instanceResource["project_id"], + instanceResource["name"], + instanceResource["acl"], + instanceResource["flavor_cpu"], + instanceResource["flavor_ram"], + instanceResource["replicas"], + instanceResource["storage_class"], + instanceResource["storage_size"], + version, + instanceResource["options_type"], + ) +} + +func TestAccMongoDBFlexFlexResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testutil.TestAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckMongoDBFlexDestroy, + Steps: []resource.TestStep{ + // Creation + { + Config: configResources(instanceResource["version"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "flavor.id"), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "flavor.description"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "replicas", instanceResource["replicas"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "storage.class", instanceResource["storage_class"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "storage.size", instanceResource["storage_size"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "version", instanceResource["version"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "options.type", instanceResource["options_type"]), + ), + }, + // data source + { + Config: fmt.Sprintf(` + %s + + data "stackit_mongodbflex_instance" "instance" { + project_id = stackit_mongodbflex_instance.instance.project_id + instance_id = stackit_mongodbflex_instance.instance.instance_id + } + `, + configResources(instanceResource["version"]), + ), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttrPair( + "data.stackit_mongodbflex_instance.instance", "project_id", + "stackit_mongodbflex_instance.instance", "project_id", + ), + resource.TestCheckResourceAttrPair( + "data.stackit_mongodbflex_instance.instance", "instance_id", + "stackit_mongodbflex_instance.instance", "instance_id", + ), + + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "flavor.id", instanceResource["flavor_id"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "flavor.description", instanceResource["flavor_description"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "replicas", instanceResource["replicas"]), + resource.TestCheckResourceAttr("data.stackit_mongodbflex_instance.instance", "options.type", instanceResource["options_type"]), + ), + }, + // Import + { + ResourceName: "stackit_mongodbflex_instance.instance", + ImportStateIdFunc: func(s *terraform.State) (string, error) { + r, ok := s.RootModule().Resources["stackit_mongodbflex_instance.instance"] + if !ok { + return "", fmt.Errorf("couldn't find resource stackit_mongodbflex_instance.instance") + } + instanceId, ok := r.Primary.Attributes["instance_id"] + if !ok { + return "", fmt.Errorf("couldn't find attribute instance_id") + } + + return fmt.Sprintf("%s,%s", testutil.ProjectId, instanceId), nil + }, + ImportState: true, + ImportStateVerify: true, + }, + // Update + { + Config: configResources(instanceResource["version_updated"]), + Check: resource.ComposeAggregateTestCheckFunc( + // Instance data + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "project_id", instanceResource["project_id"]), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "instance_id"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "name", instanceResource["name"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "acl.#", "1"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "acl.0", instanceResource["acl"]), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "flavor.id"), + resource.TestCheckResourceAttrSet("stackit_mongodbflex_instance.instance", "flavor.description"), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "flavor.ram", instanceResource["flavor_ram"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "replicas", instanceResource["replicas"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "storage.class", instanceResource["storage_class"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "storage.size", instanceResource["storage_size"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "version", instanceResource["version_updated"]), + resource.TestCheckResourceAttr("stackit_mongodbflex_instance.instance", "options.type", instanceResource["options_type"]), + ), + }, + // Deletion is done by the framework implicitly + }, + }) +} + +func testAccCheckMongoDBFlexDestroy(s *terraform.State) error { + ctx := context.Background() + var client *mongodbflex.APIClient + var err error + if testutil.MongoDBFlexCustomEndpoint == "" { + client, err = mongodbflex.NewAPIClient() + } else { + client, err = mongodbflex.NewAPIClient( + config.WithEndpoint(testutil.MongoDBFlexCustomEndpoint), + ) + } + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + instancesToDestroy := []string{} + for _, rs := range s.RootModule().Resources { + if rs.Type != "stackit_mongodbflex_instance" { + continue + } + // instance terraform ID: = "[project_id],[instance_id]" + instanceId := strings.Split(rs.Primary.ID, core.Separator)[1] + instancesToDestroy = append(instancesToDestroy, instanceId) + } + + instancesResp, err := client.GetInstances(ctx, testutil.ProjectId).Tag("").Execute() + if err != nil { + return fmt.Errorf("getting instancesResp: %w", err) + } + + items := *instancesResp.Items + for i := range items { + if items[i].Id == nil { + continue + } + if utils.Contains(instancesToDestroy, *items[i].Id) { + err := client.DeleteInstanceExecute(ctx, testutil.ProjectId, *items[i].Id) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: %w", *items[i].Id, err) + } + _, err = wait.DeleteInstanceWaitHandler(ctx, client, testutil.ProjectId, *items[i].Id).WaitWithContext(ctx) + if err != nil { + return fmt.Errorf("destroying instance %s during CheckDestroy: waiting for deletion %w", *items[i].Id, err) + } + } + } + return nil +} diff --git a/stackit/internal/services/postgresflex/instance/resource.go b/stackit/internal/services/postgresflex/instance/resource.go index 4ebdb70ed..fe9266628 100644 --- a/stackit/internal/services/postgresflex/instance/resource.go +++ b/stackit/internal/services/postgresflex/instance/resource.go @@ -64,7 +64,7 @@ var flavorTypes = map[string]attr.Type{ "ram": basetypes.Int64Type{}, } -// Struct corresponding to DataSourceModel.Storage +// Struct corresponding to Model.Storage type storageModel struct { Class types.String `tfsdk:"class"` Size types.Int64 `tfsdk:"size"` @@ -261,8 +261,9 @@ func (r *instanceResource) Create(ctx context.Context, req resource.CreateReques if resp.Diagnostics.HasError() { return } - r.loadFlavorId(ctx, &resp.Diagnostics, &model, flavor) - if resp.Diagnostics.HasError() { + err := loadFlavorId(ctx, r.client, &model, flavor) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error creating instance", fmt.Sprintf("Loading flavor ID: %v", err)) return } } @@ -395,8 +396,9 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques if resp.Diagnostics.HasError() { return } - r.loadFlavorId(ctx, &resp.Diagnostics, &model, flavor) - if resp.Diagnostics.HasError() { + err := loadFlavorId(ctx, r.client, &model, flavor) + if err != nil { + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Loading flavor ID: %v", err)) return } } @@ -435,7 +437,7 @@ func (r *instanceResource) Update(ctx context.Context, req resource.UpdateReques // Map response body to schema err = mapFields(got, &model, flavor, storage) if err != nil { - core.LogAndAddError(ctx, &resp.Diagnostics, "Error mapping fields in update", err.Error()) + core.LogAndAddError(ctx, &resp.Diagnostics, "Error updating instance", fmt.Sprintf("Processing API payload: %v", err)) return } diags = resp.State.Set(ctx, model) @@ -524,15 +526,15 @@ func mapFields(resp *postgresflex.InstanceResponse, model *Model, flavor *flavor } aclList, diags = types.ListValue(types.StringType, acl) if diags.HasError() { - return fmt.Errorf("failed to map ACL: %w", core.DiagsToError(diags)) + return fmt.Errorf("mapping ACL: %w", core.DiagsToError(diags)) } } var flavorValues map[string]attr.Value if instance.Flavor == nil { flavorValues = map[string]attr.Value{ - "id": types.StringNull(), - "description": types.StringNull(), + "id": flavor.Id, + "description": flavor.Description, "cpu": flavor.CPU, "ram": flavor.RAM, } @@ -540,13 +542,13 @@ func mapFields(resp *postgresflex.InstanceResponse, model *Model, flavor *flavor flavorValues = map[string]attr.Value{ "id": types.StringValue(*instance.Flavor.Id), "description": types.StringValue(*instance.Flavor.Description), - "cpu": types.Int64Value(int64(*instance.Flavor.Cpu)), - "ram": types.Int64Value(int64(*instance.Flavor.Memory)), + "cpu": conversion.ToTypeInt64(instance.Flavor.Cpu), + "ram": conversion.ToTypeInt64(instance.Flavor.Memory), } } flavorObject, diags := types.ObjectValue(flavorTypes, flavorValues) if diags.HasError() { - return fmt.Errorf("failed to create flavor: %w", core.DiagsToError(diags)) + return fmt.Errorf("creating flavor: %w", core.DiagsToError(diags)) } var storageValues map[string]attr.Value @@ -558,12 +560,12 @@ func mapFields(resp *postgresflex.InstanceResponse, model *Model, flavor *flavor } else { storageValues = map[string]attr.Value{ "class": types.StringValue(*instance.Storage.Class), - "size": types.Int64Value(int64(*instance.Storage.Size)), + "size": conversion.ToTypeInt64(instance.Storage.Size), } } storageObject, diags := types.ObjectValue(storageTypes, storageValues) if diags.HasError() { - return fmt.Errorf("failed to create storage: %w", core.DiagsToError(diags)) + return fmt.Errorf("creating storage: %w", core.DiagsToError(diags)) } idParts := []string{ @@ -574,29 +576,13 @@ func mapFields(resp *postgresflex.InstanceResponse, model *Model, flavor *flavor strings.Join(idParts, core.Separator), ) model.InstanceId = types.StringValue(instanceId) - if instance.Name == nil { - model.Name = types.StringNull() - } else { - model.Name = types.StringValue(*instance.Name) - } + model.Name = types.StringPointerValue(instance.Name) model.ACL = aclList - if instance.BackupSchedule == nil { - model.BackupSchedule = types.StringNull() - } else { - model.BackupSchedule = types.StringValue(*instance.BackupSchedule) - } + model.BackupSchedule = types.StringPointerValue(instance.BackupSchedule) model.Flavor = flavorObject - if instance.Replicas == nil { - model.Replicas = types.Int64Null() - } else { - model.Replicas = types.Int64Value(int64(*instance.Replicas)) - } + model.Replicas = conversion.ToTypeInt64(instance.Replicas) model.Storage = storageObject - if instance.Version == nil { - model.Version = types.StringNull() - } else { - model.Version = types.StringValue(*instance.Version) - } + model.Version = types.StringPointerValue(instance.Version) return nil } @@ -660,37 +646,35 @@ func toUpdatePayload(model *Model, acl []string, flavor *flavorModel, storage *s }, nil } -func (r *instanceResource) loadFlavorId(ctx context.Context, diags *diag.Diagnostics, model *Model, flavor *flavorModel) { +type postgresFlexClient interface { + GetFlavorsExecute(ctx context.Context, projectId string) (*postgresflex.FlavorsResponse, error) +} + +func loadFlavorId(ctx context.Context, client postgresFlexClient, model *Model, flavor *flavorModel) error { if model == nil { - diags.AddError("invalid model", "nil model") - return + return fmt.Errorf("nil model") } if flavor == nil { - diags.AddError("invalid flavor", "nil flavor") - return + return fmt.Errorf("nil flavor") } cpu := conversion.ToPtrInt32(flavor.CPU) if cpu == nil { - diags.AddError("invalid flavor", "nil CPU") - return + return fmt.Errorf("nil CPU") } ram := conversion.ToPtrInt32(flavor.RAM) if ram == nil { - diags.AddError("invalid flavor", "nil RAM") - return + return fmt.Errorf("nil RAM") } projectId := model.ProjectId.ValueString() - res, err := r.client.GetFlavors(ctx, projectId).Execute() + res, err := client.GetFlavorsExecute(ctx, projectId) if err != nil { - diags.AddError("failed to list postgresflex flavors", err.Error()) - return + return fmt.Errorf("listing postgresflex flavors: %w", err) } avl := "" if res.Flavors == nil { - diags.AddError("no flavors", fmt.Sprintf("couldn't find flavors for id %s", flavor.Id.ValueString())) - return + return fmt.Errorf("finding flavors for project %s", projectId) } for _, f := range *res.Flavors { if f.Id == nil || f.Cpu == nil || f.Memory == nil { @@ -698,12 +682,14 @@ func (r *instanceResource) loadFlavorId(ctx context.Context, diags *diag.Diagnos } if *f.Cpu == *cpu && *f.Memory == *ram { flavor.Id = types.StringValue(*f.Id) + flavor.Description = types.StringValue(*f.Description) break } avl = fmt.Sprintf("%s\n- %d CPU, %d GB RAM", avl, *f.Cpu, *f.Cpu) } if flavor.Id.ValueString() == "" { - diags.AddError("invalid flavor", fmt.Sprintf("couldn't find flavor.\navailable specs are:%s", avl)) - return + return fmt.Errorf("couldn't find flavor, available specs are:%s", avl) } + + return nil } diff --git a/stackit/internal/services/postgresflex/instance/resource_test.go b/stackit/internal/services/postgresflex/instance/resource_test.go index a70c9bb58..9b347cda3 100644 --- a/stackit/internal/services/postgresflex/instance/resource_test.go +++ b/stackit/internal/services/postgresflex/instance/resource_test.go @@ -1,6 +1,8 @@ package postgresflex import ( + "context" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -10,6 +12,19 @@ import ( "github.com/stackitcloud/stackit-sdk-go/services/postgresflex" ) +type postgresFlexClientMocked struct { + returnError bool + getFlavorsResp *postgresflex.FlavorsResponse +} + +func (c *postgresFlexClientMocked) GetFlavorsExecute(_ context.Context, _ string) (*postgresflex.FlavorsResponse, error) { + if c.returnError { + return nil, fmt.Errorf("get flavors failed") + } + + return c.getFlavorsResp, nil +} + func TestMapFields(t *testing.T) { tests := []struct { description string @@ -507,3 +522,156 @@ func TestToUpdatePayload(t *testing.T) { }) } } + +func TestLoadFlavorId(t *testing.T) { + tests := []struct { + description string + inputFlavor *flavorModel + mockedResp *postgresflex.FlavorsResponse + expected *flavorModel + getFlavorsFails bool + isValid bool + }{ + { + "ok_flavor", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &postgresflex.FlavorsResponse{ + Flavors: &[]postgresflex.InstanceFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(2)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + }, + }, + &flavorModel{ + Id: types.StringValue("fid-1"), + Description: types.StringValue("description"), + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + true, + }, + { + "ok_flavor_2", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &postgresflex.FlavorsResponse{ + Flavors: &[]postgresflex.InstanceFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(2)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + { + Id: utils.Ptr("fid-2"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(4)), + }, + }, + }, + &flavorModel{ + Id: types.StringValue("fid-1"), + Description: types.StringValue("description"), + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + true, + }, + { + "no_matching_flavor", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &postgresflex.FlavorsResponse{ + Flavors: &[]postgresflex.InstanceFlavor{ + { + Id: utils.Ptr("fid-1"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(8)), + }, + { + Id: utils.Ptr("fid-2"), + Cpu: utils.Ptr(int32(1)), + Description: utils.Ptr("description"), + Memory: utils.Ptr(int32(4)), + }, + }, + }, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + false, + }, + { + "nil_response", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &postgresflex.FlavorsResponse{}, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + false, + false, + }, + { + "error_response", + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + &postgresflex.FlavorsResponse{}, + &flavorModel{ + CPU: types.Int64Value(2), + RAM: types.Int64Value(8), + }, + true, + false, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + client := &postgresFlexClientMocked{ + returnError: tt.getFlavorsFails, + getFlavorsResp: tt.mockedResp, + } + model := &Model{ + ProjectId: types.StringValue("pid"), + } + flavorModel := &flavorModel{ + CPU: tt.inputFlavor.CPU, + RAM: tt.inputFlavor.RAM, + } + err := loadFlavorId(context.Background(), client, model, flavorModel) + if !tt.isValid && err == nil { + t.Fatalf("Should have failed") + } + if tt.isValid && err != nil { + t.Fatalf("Should not have failed: %v", err) + } + if tt.isValid { + diff := cmp.Diff(flavorModel, tt.expected) + if diff != "" { + t.Fatalf("Data does not match: %s", diff) + } + } + }) + } +} diff --git a/stackit/internal/services/postgresflex/postgresflex_acc_test.go b/stackit/internal/services/postgresflex/postgresflex_acc_test.go index 054bbee40..cf0916154 100644 --- a/stackit/internal/services/postgresflex/postgresflex_acc_test.go +++ b/stackit/internal/services/postgresflex/postgresflex_acc_test.go @@ -20,19 +20,19 @@ import ( // Instance resource data var instanceResource = map[string]string{ - "project_id": testutil.ProjectId, - "name": fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), - "acl": "192.168.0.0/16", - "backup_schedule": "00 16 * * *", - "backup_schedule_update": "00 12 * * *", - "flavor_cpu": "2", - "flavor_ram": "4", - "flavor_description": "Small, Compute optimized", - "replicas": "1", - "storage_class": "premium-perf12-stackit", - "storage_size": "5", - "version": "14", - "flavor_id": "2.4", + "project_id": testutil.ProjectId, + "name": fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(7, acctest.CharSetAlphaNum)), + "acl": "192.168.0.0/16", + "backup_schedule": "00 16 * * *", + "backup_schedule_updated": "00 12 * * *", + "flavor_cpu": "2", + "flavor_ram": "4", + "flavor_description": "Small, Compute optimized", + "replicas": "1", + "storage_class": "premium-perf12-stackit", + "storage_size": "5", + "version": "14", + "flavor_id": "2.4", } // User resource data @@ -42,7 +42,7 @@ var userResource = map[string]string{ "project_id": instanceResource["project_id"], } -func configResources() string { +func configResources(backupSchedule string) string { return fmt.Sprintf(` %s @@ -74,7 +74,7 @@ func configResources() string { instanceResource["project_id"], instanceResource["name"], instanceResource["acl"], - instanceResource["backup_schedule"], + backupSchedule, instanceResource["flavor_cpu"], instanceResource["flavor_ram"], instanceResource["replicas"], @@ -93,7 +93,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) { Steps: []resource.TestStep{ // Creation { - Config: configResources(), + Config: configResources(instanceResource["backup_schedule"]), Check: resource.ComposeAggregateTestCheckFunc( // Instance resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]), @@ -140,7 +140,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) { user_id = stackit_postgresflex_user.user.user_id } `, - configResources(), + configResources(instanceResource["backup_schedule"]), ), Check: resource.ComposeAggregateTestCheckFunc( // Instance data @@ -220,38 +220,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) { }, // Update { - Config: fmt.Sprintf(` - %s - - resource "stackit_postgresflex_instance" "instance" { - project_id = "%s" - name = "%s" - acl = ["%s"] - backup_schedule = "%s" - flavor = { - cpu = %s - ram = %s - } - replicas = %s - storage = { - class = "%s" - size = %s - } - version = "%s" - } - `, - testutil.PostgresFlexProviderConfig(), - instanceResource["project_id"], - instanceResource["name"], - instanceResource["acl"], - instanceResource["backup_schedule_update"], - instanceResource["flavor_cpu"], - instanceResource["flavor_ram"], - instanceResource["replicas"], - instanceResource["storage_class"], - instanceResource["storage_size"], - instanceResource["version"], - ), + Config: configResources(instanceResource["backup_schedule_updated"]), Check: resource.ComposeAggregateTestCheckFunc( // Instance data resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "project_id", instanceResource["project_id"]), @@ -259,7 +228,7 @@ func TestAccPostgresFlexFlexResource(t *testing.T) { resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "name", instanceResource["name"]), resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.#", "1"), resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "acl.0", instanceResource["acl"]), - resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "backup_schedule", instanceResource["backup_schedule_update"]), + resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "backup_schedule", instanceResource["backup_schedule_updated"]), resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.id"), resource.TestCheckResourceAttrSet("stackit_postgresflex_instance.instance", "flavor.description"), resource.TestCheckResourceAttr("stackit_postgresflex_instance.instance", "flavor.cpu", instanceResource["flavor_cpu"]), diff --git a/stackit/internal/testutil/testutil.go b/stackit/internal/testutil/testutil.go index c4c3602f7..a6027e058 100644 --- a/stackit/internal/testutil/testutil.go +++ b/stackit/internal/testutil/testutil.go @@ -39,6 +39,7 @@ var ( DnsCustomEndpoint = os.Getenv("TF_ACC_DNS_CUSTOM_ENDPOINT") LogMeCustomEndpoint = os.Getenv("TF_ACC_LOGME_CUSTOM_ENDPOINT") MariaDBCustomEndpoint = os.Getenv("TF_ACC_MARIADB_CUSTOM_ENDPOINT") + MongoDBFlexCustomEndpoint = os.Getenv("TF_ACC_MONGODBFLEX_CUSTOM_ENDPOINT") OpenSearchCustomEndpoint = os.Getenv("TF_ACC_OPENSEARCH_CUSTOM_ENDPOINT") ObjectStorageCustomEndpoint = os.Getenv("TF_ACC_OBJECTSTORAGE_CUSTOM_ENDPOINT") PostgreSQLCustomEndpoint = os.Getenv("TF_ACC_POSTGRESQL_CUSTOM_ENDPOINT") @@ -106,6 +107,21 @@ func MariaDBProviderConfig() string { ) } +func MongoDBFlexProviderConfig() string { + if MongoDBFlexCustomEndpoint == "" { + return ` + provider "stackit" { + region = "eu01" + }` + } + return fmt.Sprintf(` + provider "stackit" { + mongodbflex_custom_endpoint = "%s" + }`, + MongoDBFlexCustomEndpoint, + ) +} + func ObjectStorageProviderConfig() string { if ObjectStorageCustomEndpoint == "" { return ` diff --git a/stackit/provider.go b/stackit/provider.go index 2da1e8459..e2859e899 100644 --- a/stackit/provider.go +++ b/stackit/provider.go @@ -18,6 +18,7 @@ import ( logMeInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/logme/instance" mariaDBCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/mariadb/credential" mariaDBInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/mariadb/instance" + mongoDBFlexInstance "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/mongodbflex/instance" objectStorageBucket "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/bucket" objecStorageCredential "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/credential" objecStorageCredentialsGroup "github.com/stackitcloud/terraform-provider-stackit/stackit/internal/services/objectstorage/credentialsgroup" @@ -78,6 +79,7 @@ type providerModel struct { DNSCustomEndpoint types.String `tfsdk:"dns_custom_endpoint"` PostgreSQLCustomEndpoint types.String `tfsdk:"postgresql_custom_endpoint"` PostgresFlexCustomEndpoint types.String `tfsdk:"postgresflex_custom_endpoint"` + MongoDBFlexCustomEndpoint types.String `tfsdk:"mongodbflex_custom_endpoint"` LogMeCustomEndpoint types.String `tfsdk:"logme_custom_endpoint"` RabbitMQCustomEndpoint types.String `tfsdk:"rabbitmq_custom_endpoint"` MariaDBCustomEndpoint types.String `tfsdk:"mariadb_custom_endpoint"` @@ -106,6 +108,7 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro "dns_custom_endpoint": "Custom endpoint for the DNS service", "postgresql_custom_endpoint": "Custom endpoint for the PostgreSQL service", "postgresflex_custom_endpoint": "Custom endpoint for the PostgresFlex service", + "mongodbflex_custom_endpoint": "Custom endpoint for the MongoDB Flex service", "logme_custom_endpoint": "Custom endpoint for the LogMe service", "rabbitmq_custom_endpoint": "Custom endpoint for the RabbitMQ service", "mariadb_custom_endpoint": "Custom endpoint for the MariaDB service", @@ -165,6 +168,10 @@ func (p *Provider) Schema(_ context.Context, _ provider.SchemaRequest, resp *pro Optional: true, Description: descriptions["postgresflex_custom_endpoint"], }, + "mongodbflex_custom_endpoint": schema.StringAttribute{ + Optional: true, + Description: descriptions["mongodbflex_custom_endpoint"], + }, "logme_custom_endpoint": schema.StringAttribute{ Optional: true, Description: descriptions["logme_custom_endpoint"], @@ -264,6 +271,9 @@ func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, if !(providerConfig.PostgresFlexCustomEndpoint.IsUnknown() || providerConfig.PostgresFlexCustomEndpoint.IsNull()) { providerData.PostgresFlexCustomEndpoint = providerConfig.PostgresFlexCustomEndpoint.ValueString() } + if !(providerConfig.MongoDBFlexCustomEndpoint.IsUnknown() || providerConfig.MongoDBFlexCustomEndpoint.IsNull()) { + providerData.MongoDBFlexCustomEndpoint = providerConfig.MongoDBFlexCustomEndpoint.ValueString() + } if !(providerConfig.LogMeCustomEndpoint.IsUnknown() || providerConfig.LogMeCustomEndpoint.IsNull()) { providerData.LogMeCustomEndpoint = providerConfig.LogMeCustomEndpoint.ValueString() } @@ -324,6 +334,7 @@ func (p *Provider) DataSources(_ context.Context) []func() datasource.DataSource logMeCredential.NewCredentialDataSource, mariaDBInstance.NewInstanceDataSource, mariaDBCredential.NewCredentialDataSource, + mongoDBFlexInstance.NewInstanceDataSource, objectStorageBucket.NewBucketDataSource, objecStorageCredentialsGroup.NewCredentialsGroupDataSource, objecStorageCredential.NewCredentialDataSource, @@ -355,6 +366,7 @@ func (p *Provider) Resources(_ context.Context) []func() resource.Resource { logMeCredential.NewCredentialResource, mariaDBInstance.NewInstanceResource, mariaDBCredential.NewCredentialResource, + mongoDBFlexInstance.NewInstanceResource, objectStorageBucket.NewBucketResource, objecStorageCredentialsGroup.NewCredentialsGroupResource, objecStorageCredential.NewCredentialResource,