From 9ac990cab38778f3785307e061900eae888d4bc0 Mon Sep 17 00:00:00 2001 From: Anton Sidelnikov <53078276+anton-sidelnikov@users.noreply.github.com> Date: Thu, 26 Jan 2023 14:29:46 +0100 Subject: [PATCH] [DWS] add `resource/opentelekomcloud_dws_cluster_v1.go` (#2061) [DWS] add `resource/opentelekomcloud_dws_cluster_v1.go` Summary of the Pull Request New resource opentelekomcloud_dws_cluster_v1 PR Checklist Tests added/passed. Documentation updated. Schema updated. Release notes added. Acceptance Steps Performed === RUN TestAccDwsClusterV1_basic === PAUSE TestAccDwsClusterV1_basic === CONT TestAccDwsClusterV1_basic --- PASS: TestAccDwsClusterV1_basic (702.69s) PASS Debugger finished with the exit code 0 Reviewed-by: Aloento Reviewed-by: Vladimir Vshivkov Reviewed-by: Artem Lifshits Reviewed-by: Anton Sidelnikov --- docs/resources/dws_cluster_v1.md | 166 ++++++ go.mod | 2 +- go.sum | 2 + ...ce_opentelekomcloud_dws_cluster_v1_test.go | 154 ++++++ opentelekomcloud/common/cfg/config.go | 7 + opentelekomcloud/provider.go | 2 + opentelekomcloud/services/dws/common.go | 5 + ...esource_opentelekomcloud_dws_cluster_v1.go | 509 ++++++++++++++++++ .../notes/dws-cluster-5bbc0e9d9d45f6f7.yaml | 4 + 9 files changed, 850 insertions(+), 1 deletion(-) create mode 100644 docs/resources/dws_cluster_v1.md create mode 100644 opentelekomcloud/acceptance/dws/resource_opentelekomcloud_dws_cluster_v1_test.go create mode 100644 opentelekomcloud/services/dws/common.go create mode 100644 opentelekomcloud/services/dws/resource_opentelekomcloud_dws_cluster_v1.go create mode 100644 releasenotes/notes/dws-cluster-5bbc0e9d9d45f6f7.yaml diff --git a/docs/resources/dws_cluster_v1.md b/docs/resources/dws_cluster_v1.md new file mode 100644 index 000000000..f941bad6a --- /dev/null +++ b/docs/resources/dws_cluster_v1.md @@ -0,0 +1,166 @@ +--- +subcategory: "Data Warehouse Service (DWS)" +--- + +# opentelekomcloud_dws_cluster_v1 + +Manages Cluster in the Data Warehouse Service. + +## Example Usage + +### Dws Cluster Example + +```hcl +variable "availability_zone" {} +variable "network_id" {} +variable "vpc_id" {} +variable "security_group_id" {} + +resource "opentelekomcloud_dws_cluster_v1" "cluster_1" { + name = "my_dws_cluster" + user_name = "dbadmin" + user_pwd = "#dbadmin123" + node_type = "dws.m3.xlarge" + number_of_node = 3 + network_id = var.network_id + security_group_id = var.security_group_id + vpc_id = var.vpc_id + availability_zone = var.availability_zone + port = 8899 + + public_ip { + public_bind_type = "auto_assign" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required, String, ForceNew) Cluster name, which must be unique and contains 4 to 64 characters, which + consist of letters, digits, hyphens(-), or underscores(_) only and must start with a letter. + +* `network_id` - (Required, String, ForceNew) Network ID, which is used for configuring cluster network. + +* `node_type` - (Required, String, ForceNew) Node type. + +* `number_of_node` - (Required, Int) Number of nodes in a cluster. The value ranges from 3 to 32. When expanding, + add at least 3 nodes. + +* `security_group_id` - (Required, String, ForceNew) ID of a security group. The ID is used for configuring cluster + network. + +* `user_name` - (Required, String, ForceNew) Administrator username for logging in to a data warehouse cluster The + administrator username must: Consist of lowercase letters, digits, or underscores. Start with a lowercase letter or + an underscore. Contain 1 to 63 characters. Cannot be a keyword of the DWS database. + +* `vpc_id` - (Required, String, ForceNew) VPC ID, which is used for configuring cluster network. + +* `user_pwd` - (Required, String) Administrator password for logging in to a data warehouse cluster A password + must conform to the following rules: Contains 8 to 32 characters. Cannot be the same as the username or the username + written in reverse order. Contains three types of the following: + Lowercase letters Uppercase letters Digits Special characters + ~!@#%^&*()-_=+|[{}];:,<.>/? + +* `availability_zone` - (Optional, String, ForceNew) AZ in a cluster. + +* `port` - (Optional, Int) Service port of a cluster (8000 to 10000). The default value is 8000. + +* `public_ip` - (Optional, List, ForceNew) A nested object resource Structure is documented below. + +The `public_ip` block supports: + +* `eip_id` - (Optional, String, ForceNew) EIP ID. + +* `public_bind_type` - (Optional, String, ForceNew) Binding type of an EIP. The value can be either of the following: + `auto_assign` `not_use` `bind_existing` The default value is `not_use`. + +* `number_of_cn` - (Optional, int, ForceNew) Specifies the number of CN. If you use a large-scale cluster, deploy + multiple CNs. + +* `keep_last_manual_snapshot` - (Optional, int, ForceNew) The number of latest manual snapshots that need to be retained for a cluster. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `created` - Cluster creation time. The format is ISO8601:YYYY-MM-DDThh:mm:ssZ + +* `endpoints` - View the private network connection information about the cluster. Structure is documented below. + +* `id` - Cluster ID + +* `public_endpoints` - Public network connection information about the cluster. If the value is not specified, the + public network connection information is not used by default Structure is documented below. + +* `recent_event` - The recent event number. + +* `status` - Cluster status, which can be one of the following: CREATING AVAILABLE UNAVAILABLE CREATION FAILED. + +* `sub_status` - Sub-status of clusters in the AVAILABLE state. The value can be one of the following: NORMAL READONLY + REDISTRIBUTING REDISTRIBUTION-FAILURE UNBALANCED UNBALANCED | READONLY DEGRADED DEGRADED | READONLY DEGRADED | + UNBALANCED UNBALANCED | REDISTRIBUTING UNBALANCED | REDISTRIBUTION-FAILURE READONLY | REDISTRIBUTION-FAILURE + UNBALANCED | READONLY | REDISTRIBUTION-FAILURE DEGRADED | REDISTRIBUTION-FAILURE DEGRADED | UNBALANCED | + REDISTRIBUTION-FAILURE DEGRADED | UNBALANCED | READONLY | REDISTRIBUTION-FAILURE DEGRADED | UNBALANCED | READONLY + +* `task_status` - Cluster management task. The value can be one of the following: + RESTORING SNAPSHOTTING GROWING REBOOTING SETTING_CONFIGURATION CONFIGURING_EXT_DATASOURCE DELETING_EXT_DATASOURCE + REBOOT_FAILURE RESIZE_FAILURE + +* `updated` - Last modification time of a cluster. The format is ISO8601:YYYY-MM-DDThh:mm:ssZ + +* `version` - Data warehouse version. + +* `private_ip` - List of private network IP address. + +The `endpoints` block contains: + +* `connect_info` - (Optional, String) Private network connection information. + +* `jdbc_url` - (Optional, String) + JDBC URL. The following is the default format: + jdbc:postgresql://< connect_info>/ + +The `public_endpoints` block contains: + +* `jdbc_url` - (Optional, String) + JDBC URL. The following is the default format: + jdbc:postgresql://< public_connect_info>/ + +* `public_connect_info` - (Optional, String) + Public network connection information. + +## Timeouts + +This resource provides the following timeouts configuration options: + +* `create` - Default is 60 minute. +* `update` - Default is 60 minute. +* `delete` - Default is 60 minute. + +## Import + +Cluster can be imported using the following format: + +``` +$ terraform import opentelekomcloud_dws_cluster_v1.cluster_1 4ca46bf1-5c61-48ff-b4f3-0ad4e5e3ba90 +``` + +Note that the imported state may not be identical to your resource definition, due to some attributes missing from the +API response, security or some other reason. The missing attributes include: `user_pwd`, `number_of_cn`. +It is generally recommended running `terraform plan` after importing a cluster. +You can then decide if changes should be applied to the cluster, or the resource definition +should be updated to align with the cluster. Also you can ignore changes as below. + +``` +resource "opentelekomcloud_dws_cluster_v1" "cluster_1" { + ... + + lifecycle { + ignore_changes = [ + user_pwd, number_of_cn, + ] + } +} +``` diff --git a/go.mod b/go.mod index 2242e6576..f02095991 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/jinzhu/copier v0.3.5 github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 github.com/mitchellh/go-homedir v1.1.0 - github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123135116-3a75ec2da837 + github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123155616-6123409669db github.com/unknwon/com v1.0.1 golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 diff --git a/go.sum b/go.sum index 5405974dc..39e5e9bdc 100644 --- a/go.sum +++ b/go.sum @@ -209,6 +209,8 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123135116-3a75ec2da837 h1:mxCr7nKsDMSwd8SLNItm4uG4PPjbGctWMvx0dIeIUUE= github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123135116-3a75ec2da837/go.mod h1:/QD0ZIzm3zMdE0iBSAP3+Z9eCViU2PgnQqp4KGrpR/M= +github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123155616-6123409669db h1:rQQ7MA/ruBOwrHDIggH0RCK8UUsbLiNIbsd1ey6duAQ= +github.com/opentelekomcloud/gophertelekomcloud v0.5.28-0.20230123155616-6123409669db/go.mod h1:/QD0ZIzm3zMdE0iBSAP3+Z9eCViU2PgnQqp4KGrpR/M= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/opentelekomcloud/acceptance/dws/resource_opentelekomcloud_dws_cluster_v1_test.go b/opentelekomcloud/acceptance/dws/resource_opentelekomcloud_dws_cluster_v1_test.go new file mode 100644 index 000000000..f52371aa5 --- /dev/null +++ b/opentelekomcloud/acceptance/dws/resource_opentelekomcloud_dws_cluster_v1_test.go @@ -0,0 +1,154 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/opentelekomcloud/gophertelekomcloud/openstack/dws/v1/cluster" + + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/acceptance/common" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/acceptance/env" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/common/cfg" +) + +const resourceInstanceName = "opentelekomcloud_dws_cluster_v1.cluster_1" + +func TestAccDwsClusterV1_basic(t *testing.T) { + var cls cluster.ClusterDetail + var clusterName = fmt.Sprintf("dws_cluster_%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { common.TestAccPreCheck(t) }, + ProviderFactories: common.TestAccProviderFactories, + CheckDestroy: testAccCheckDwsV1ClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDwsV1ClusterBasic(clusterName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDwsV1ClusterExists(resourceInstanceName, &cls), + resource.TestCheckResourceAttr(resourceInstanceName, "name", clusterName), + resource.TestCheckResourceAttr(resourceInstanceName, "number_of_node", "3"), + ), + }, + { + Config: testAccDwsV1ClusterUpdated(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceInstanceName, "name", clusterName), + resource.TestCheckResourceAttr(resourceInstanceName, "number_of_node", "6"), + ), + }, + { + ResourceName: resourceInstanceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "user_pwd", "number_of_cn", + }, + }, + }, + }) +} + +func testAccCheckDwsV1ClusterDestroy(s *terraform.State) error { + config := common.TestAccProvider.Meta().(*cfg.Config) + client, err := config.DwsV1Client(env.OS_REGION_NAME) + if err != nil { + return fmt.Errorf("error creating DWSv1 client: %w", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "opentelekomcloud_dws_cluster_v1" { + continue + } + + _, err := cluster.ListClusterDetails(client, rs.Primary.ID) + if err == nil { + return fmt.Errorf("DWS cluster still exists") + } + } + return nil +} + +func testAccCheckDwsV1ClusterExists(n string, cls *cluster.ClusterDetail) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("no ID is set") + } + + config := common.TestAccProvider.Meta().(*cfg.Config) + dwsClient, err := config.DwsV1Client(env.OS_REGION_NAME) + if err != nil { + return fmt.Errorf("error creating DWSv1 client: %w", err) + } + + v, err := cluster.ListClusterDetails(dwsClient, rs.Primary.ID) + if err != nil { + return fmt.Errorf("error getting cluster (%s): %w", rs.Primary.ID, err) + } + + if v.Id != rs.Primary.ID { + return fmt.Errorf("DWS cluster not found") + } + *cls = *v + return nil + } +} + +func testAccDwsV1ClusterBasic(clusterName string) string { + return fmt.Sprintf(` +%s + +%s + +resource "opentelekomcloud_dws_cluster_v1" "cluster_1" { + name = "%s" + user_name = "dbadmin" + user_pwd = "#dbadmin123" + node_type = "dws.m3.xlarge" + number_of_node = 3 + network_id = data.opentelekomcloud_vpc_subnet_v1.shared_subnet.network_id + security_group_id = data.opentelekomcloud_networking_secgroup_v2.default_secgroup.id + vpc_id = data.opentelekomcloud_vpc_subnet_v1.shared_subnet.vpc_id + availability_zone = "%s" + port = 8899 + + public_ip { + public_bind_type = "auto_assign" + } +} +`, common.DataSourceSecGroupDefault, common.DataSourceSubnet, clusterName, env.OS_AVAILABILITY_ZONE) +} + +// extend not stable, skip this for now +func testAccDwsV1ClusterUpdated(clusterName string) string { + return fmt.Sprintf(` +%s + +%s + +resource "opentelekomcloud_dws_cluster_v1" "cluster_1" { + name = "%s" + user_name = "dbadmin" + user_pwd = "#dbadmin1234" + node_type = "dws.m3.xlarge" + number_of_node = 6 + network_id = data.opentelekomcloud_vpc_subnet_v1.shared_subnet.network_id + security_group_id = data.opentelekomcloud_networking_secgroup_v2.default_secgroup.id + vpc_id = data.opentelekomcloud_vpc_subnet_v1.shared_subnet.vpc_id + availability_zone = "%s" + port = 8899 + + public_ip { + public_bind_type = "auto_assign" + } +} +`, common.DataSourceSecGroupDefault, common.DataSourceSubnet, clusterName, env.OS_AVAILABILITY_ZONE) +} diff --git a/opentelekomcloud/common/cfg/config.go b/opentelekomcloud/common/cfg/config.go index 02fba9f2e..ed1fb5224 100644 --- a/opentelekomcloud/common/cfg/config.go +++ b/opentelekomcloud/common/cfg/config.go @@ -1008,6 +1008,13 @@ func (c *Config) VpcEpV1Client(region string) (*golangsdk.ServiceClient, error) }) } +func (c *Config) DwsV1Client(region string) (*golangsdk.ServiceClient, error) { + return openstack.NewDWSV1(c.HwClient, golangsdk.EndpointOpts{ + Region: region, + Availability: c.getEndpointType(), + }) +} + func reconfigProjectName(src Config, projectName ProjectName) (*Config, error) { config := &Config{} if err := copier.Copy(config, &src); err != nil { diff --git a/opentelekomcloud/provider.go b/opentelekomcloud/provider.go index 7a883971a..8fa6ba24d 100644 --- a/opentelekomcloud/provider.go +++ b/opentelekomcloud/provider.go @@ -21,6 +21,7 @@ import ( "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/deh" "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/dms" "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/dns" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/dws" "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/ecs" elbv2 "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/elb/v2" elbv3 "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/services/elb/v3" @@ -350,6 +351,7 @@ func Provider() *schema.Provider { "opentelekomcloud_dns_zone_v2": dns.ResourceDNSZoneV2(), "opentelekomcloud_dms_instance_v1": dms.ResourceDmsInstancesV1(), "opentelekomcloud_dms_topic_v1": dms.ResourceDmsTopicsV1(), + "opentelekomcloud_dws_cluster_v1": dws.ResourceDcsInstanceV1(), "opentelekomcloud_ecs_instance_v1": ecs.ResourceEcsInstanceV1(), "opentelekomcloud_evs_volume_v3": evs.ResourceEvsStorageVolumeV3(), "opentelekomcloud_fw_firewall_group_v2": fw.ResourceFWFirewallGroupV2(), diff --git a/opentelekomcloud/services/dws/common.go b/opentelekomcloud/services/dws/common.go new file mode 100644 index 000000000..754ee23e8 --- /dev/null +++ b/opentelekomcloud/services/dws/common.go @@ -0,0 +1,5 @@ +package dws + +const ( + errCreationClient = "error creating OpenTelekomCloud DWSv1 client: %w" +) diff --git a/opentelekomcloud/services/dws/resource_opentelekomcloud_dws_cluster_v1.go b/opentelekomcloud/services/dws/resource_opentelekomcloud_dws_cluster_v1.go new file mode 100644 index 000000000..30969552d --- /dev/null +++ b/opentelekomcloud/services/dws/resource_opentelekomcloud_dws_cluster_v1.go @@ -0,0 +1,509 @@ +package dws + +import ( + "context" + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + golangsdk "github.com/opentelekomcloud/gophertelekomcloud" + "github.com/opentelekomcloud/gophertelekomcloud/openstack/dws/v1/cluster" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/common" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/common/cfg" + "github.com/opentelekomcloud/terraform-provider-opentelekomcloud/opentelekomcloud/common/fmterr" +) + +func ResourceDcsInstanceV1() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDwsClusterV1Create, + ReadContext: resourceDwsClusterV1Read, + UpdateContext: resourceDwsClusterV1Update, + DeleteContext: resourceDwsClusterV1Delete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(4, 64), + validation.StringMatch( + regexp.MustCompile(`^[\-_A-Za-z0-9]+$`), + "Only letters, digits, underscores (_), and hyphens (-) are allowed.", + ), + ), + }, + "user_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: common.ValidateName, + }, + "user_pwd": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "node_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "number_of_node": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 256), + }, + "network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "security_group_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "availability_zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "port": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(8000, 30000), + ForceNew: true, + Computed: true, + }, + "number_of_cn": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(2, 20), + Default: 3, + ForceNew: true, + }, + "keep_last_manual_snapshot": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "public_ip": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_bind_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + "auto_assign", "not_use", "bind_existing", + }, false), + }, + "eip_id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "created": { + Type: schema.TypeString, + Computed: true, + }, + "endpoints": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connect_info": { + Type: schema.TypeString, + Computed: true, + }, + "jdbc_url": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "public_endpoints": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_connect_info": { + Type: schema.TypeString, + Computed: true, + }, + "jdbc_url": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "recent_event": { + Type: schema.TypeInt, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "sub_status": { + Type: schema.TypeString, + Computed: true, + }, + "task_status": { + Type: schema.TypeString, + Computed: true, + }, + "updated": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceDwsClusterV1Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*cfg.Config) + client, err := config.DwsV1Client(config.GetRegion(d)) + if err != nil { + return fmterr.Errorf(errCreationClient, err) + } + + createOpts := cluster.CreateClusterOpts{ + NodeType: d.Get("node_type").(string), + Name: d.Get("name").(string), + NumberOfNode: d.Get("number_of_node").(int), + SubnetId: d.Get("network_id").(string), + SecurityGroupId: d.Get("security_group_id").(string), + VpcId: d.Get("vpc_id").(string), + AvailabilityZone: d.Get("availability_zone").(string), + Port: d.Get("port").(int), + UserName: d.Get("user_name").(string), + UserPwd: d.Get("user_pwd").(string), + NumberOfCn: d.Get("number_of_cn").(int), + } + + if _, ok := d.GetOk("public_ip.0"); ok { + createOpts.PublicIp = cluster.PublicIp{ + PublicBindType: d.Get("public_ip.0.public_bind_type").(string), + EipId: d.Get("public_ip.0.eip_id").(string), + } + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + clusterID, err := cluster.CreateCluster(client, createOpts) + if err != nil { + return fmterr.Errorf("error creating DWS cluster: %w", err) + } + log.Printf("[INFO] cluster ID: %s", clusterID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"CREATING"}, + Target: []string{"AVAILABLE"}, + Refresh: dwsClusterV1StateRefreshFunc(client, clusterID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return fmterr.Errorf("error waiting for instance (%s) to become ready: %w", clusterID, err) + } + + d.SetId(clusterID) + + return resourceDwsClusterV1Read(ctx, d, meta) +} + +func resourceDwsClusterV1Read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*cfg.Config) + client, err := config.DwsV1Client(config.GetRegion(d)) + if err != nil { + return fmterr.Errorf(errCreationClient, err) + } + + v, err := cluster.ListClusterDetails(client, d.Id()) + if err != nil { + return diag.FromErr(err) + } + + log.Printf("[DEBUG] DWS cluster %s: %+v", d.Id(), v) + + mErr := multierror.Append( + d.Set("name", v.Name), + d.Set("network_id", v.SubnetId), + d.Set("node_type", v.NodeType), + d.Set("number_of_node", v.NumberOfNode), + d.Set("security_group_id", v.SecurityGroupId), + d.Set("user_name", v.UserName), + d.Set("vpc_id", v.VpcId), + d.Set("availability_zone", v.AvailabilityZone), + d.Set("port", v.Port), + d.Set("created", v.Created), + d.Set("recent_event", v.RecentEvent), + d.Set("status", v.Status), + d.Set("sub_status", v.SubStatus), + d.Set("task_status", v.TaskStatus), + d.Set("updated", v.Updated), + d.Set("version", v.Version), + d.Set("private_ip", v.PrivateIp), + ) + + if err := mErr.ErrorOrNil(); err != nil { + return diag.FromErr(err) + } + + if v.PublicIp.EipId != "" { + value := []interface{}{map[string]string{ + "eip_id": v.PublicIp.EipId, + "public_bind_type": v.PublicIp.PublicBindType, + }} + if err := d.Set("public_ip", value); err != nil { + return diag.FromErr(err) + } + } + + if len(v.Endpoints) > 0 { + private := make([]interface{}, 0, len(v.Endpoints)) + for _, endpoint := range v.Endpoints { + transformed := map[string]interface{}{ + "connect_info": endpoint.ConnectInfo, + "jdbc_url": endpoint.JdbcUrl, + } + private = append(private, transformed) + } + if err := d.Set("endpoints", private); err != nil { + return diag.FromErr(err) + } + } + + if len(v.PublicEndpoints) > 0 { + public := make([]interface{}, 0, len(v.PublicEndpoints)) + for _, endpoint := range v.PublicEndpoints { + transformed := map[string]interface{}{ + "public_connect_info": endpoint.PublicConnectInfo, + "jdbc_url": endpoint.JdbcUrl, + } + public = append(public, transformed) + } + if err := d.Set("public_endpoints", public); err != nil { + return diag.FromErr(err) + } + } + + return nil +} + +func resourceDwsClusterV1Update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*cfg.Config) + client, err := config.DwsV1Client(config.GetRegion(d)) + if err != nil { + return fmterr.Errorf(errCreationClient, err) + } + + // extend cluster + if d.HasChange("number_of_node") { + oldValue, newValue := d.GetChange("number_of_node") + num := newValue.(int) - oldValue.(int) + err = cluster.ResizeCluster(client, cluster.ResizeClusterOpts{ + ClusterId: d.Id(), + Count: num, + }) + if err != nil { + return fmterr.Errorf("Extend DWS cluster failed, cluster_id: %s, error: %s", d.Id(), err) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"DONE"}, + Refresh: dwsClusterV1StateRefreshFuncUpdate(client, d.Id(), true), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + PollInterval: 20 * d.Timeout(schema.TimeoutUpdate), + } + + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return fmterr.Errorf("error waiting for cluster (%s) to update: %w", d.Id(), err) + } + } + + // change pwd + if d.HasChange("user_pwd") { + newValue := d.Get("user_pwd") + + err = cluster.ResetPassword(client, cluster.ResetPasswordOpts{ + ClusterId: d.Id(), + NewPassword: newValue.(string), + }) + if err != nil { + return fmterr.Errorf("reset password of DWS cluster failed. cluster_id: %s, error: %s", d.Id(), err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING"}, + Target: []string{"DONE"}, + Refresh: dwsClusterV1StateRefreshFuncUpdate(client, d.Id(), false), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + PollInterval: 20 * d.Timeout(schema.TimeoutUpdate), + } + + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return fmterr.Errorf("error waiting for cluster (%s) to update: %w", d.Id(), err) + } + } + + return resourceDwsClusterV1Read(ctx, d, meta) +} + +func resourceDwsClusterV1Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*cfg.Config) + client, err := config.DwsV1Client(config.GetRegion(d)) + if err != nil { + return fmterr.Errorf(errCreationClient, err) + } + + _, err = cluster.ListClusterDetails(client, d.Id()) + if err != nil { + return common.CheckDeletedDiag(d, err, "DWS instance") + } + var keepSnapshots = new(int) + *keepSnapshots = d.Get("keep_last_manual_snapshot").(int) + err = cluster.DeleteCluster(client, cluster.DeleteClusterOpts{ + ClusterId: d.Id(), + KeepLastManualSnapshot: keepSnapshots, + }) + if err != nil { + return fmterr.Errorf("error deleting DWS instance: %w", err) + } + + log.Printf("[DEBUG] Waiting for cluster (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"AVAILABLE"}, + Target: []string{"DELETED"}, + Refresh: dwsClusterV1StateRefreshFunc(client, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return fmterr.Errorf("error waiting for cluster (%s) to delete: %w", d.Id(), err) + } + + log.Printf("[DEBUG] DWS instance %s deactivated.", d.Id()) + d.SetId("") + return nil +} + +func dwsClusterV1StateRefreshFunc(client *golangsdk.ServiceClient, clusterID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := cluster.ListClusterDetails(client, clusterID) + if err != nil { + if _, ok := err.(golangsdk.ErrDefault404); ok { + return v, "DELETED", nil + } + return nil, "", err + } + + return v, v.Status, nil + } +} + +func dwsClusterV1StateRefreshFuncUpdate(client *golangsdk.ServiceClient, clusterID string, isExtendTask bool) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := cluster.ListClusterDetails(client, clusterID) + if err != nil { + return nil, "FAILED", err + } + if resp.FailedReasons.ErrorMsg != "" && resp.FailedReasons.ErrorCode != "" { + return nil, "FAILED", fmt.Errorf("error_code: %s, error_msg: %s", resp.FailedReasons.ErrorCode, + resp.FailedReasons.ErrorMsg) + } + + cState, cErr := parseClusterStatus(resp, isExtendTask) + if cErr != nil { + return nil, "FAILED", cErr + } + if cState { + return resp, "DONE", nil + } + return resp, "PENDING", nil + } +} + +// when extend=true: if TaskStatus = RESIZE_FAILURE ,return error; else just check cluster is no task running +func parseClusterStatus(detail *cluster.ClusterDetail, extend bool) (bool, error) { + if len(detail.ActionProgress) > 0 { + return false, nil + } + + if detail.Status != "AVAILABLE" { + return false, nil + } + + if extend && detail.TaskStatus == "RESIZE_FAILURE" { + return false, fmt.Errorf("RESIZE_FAILURE") + } + + if detail.TaskStatus != "" { + return false, nil + } + + if detail.SubStatus != "NORMAL" { + return false, nil + } + + return true, nil +} diff --git a/releasenotes/notes/dws-cluster-5bbc0e9d9d45f6f7.yaml b/releasenotes/notes/dws-cluster-5bbc0e9d9d45f6f7.yaml new file mode 100644 index 000000000..010e2fefc --- /dev/null +++ b/releasenotes/notes/dws-cluster-5bbc0e9d9d45f6f7.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + **New Resource:** ``opentelekomcloud_dws_cluster_v1`` (`#2061 `_)