Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

redshift/cluster: Add multi_az configuration #35508

Merged
merged 4 commits into from
Jan 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changelog/35508.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
```release-note:enhancement
resource/aws_redshift_cluster: Add `multi_az` argument
```

```release-note:enhancement
data/aws_redshift_cluster: Add `multi_az` attribute
```
237 changes: 143 additions & 94 deletions internal/service/redshift/cluster.go

Large diffs are not rendered by default.

54 changes: 27 additions & 27 deletions internal/service/redshift/cluster_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-aws/internal/conns"
"github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag"
"github.com/hashicorp/terraform-provider-aws/internal/flex"
tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices"
tftags "github.com/hashicorp/terraform-provider-aws/internal/tags"
)

// @SDKDataSource("aws_redshift_cluster")
// @SDKDataSource("aws_redshift_cluster", name="Cluster")
func DataSourceCluster() *schema.Resource {
return &schema.Resource{
ReadWithoutTimeout: dataSourceClusterRead,
Expand Down Expand Up @@ -153,6 +153,10 @@ func DataSourceCluster() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
},
"multi_az": {
Type: schema.TypeBool,
Computed: true,
},
"node_type": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -227,11 +231,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int
d.Set("aqua_configuration_status", rsc.AquaConfiguration.AquaConfigurationStatus)
}
d.Set("availability_zone", rsc.AvailabilityZone)
azr, err := clusterAvailabilityZoneRelocationStatus(rsc)
if err != nil {
return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", clusterID, err)
if v, err := clusterAvailabilityZoneRelocationStatus(rsc); err != nil {
return sdkdiag.AppendFromErr(diags, err)
} else {
d.Set("availability_zone_relocation_enabled", v)
}
d.Set("availability_zone_relocation_enabled", azr)
d.Set("cluster_identifier", rsc.ClusterIdentifier)
d.Set("cluster_namespace_arn", rsc.ClusterNamespaceArn)
if err := d.Set("cluster_nodes", flattenClusterNodes(rsc.ClusterNodes)); err != nil {
Expand All @@ -250,6 +254,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int
}
d.Set("cluster_version", rsc.ClusterVersion)
d.Set("database_name", rsc.DBName)
d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn)
if rsc.ElasticIpStatus != nil {
d.Set("elastic_ip", rsc.ElasticIpStatus.ElasticIp)
}
Expand All @@ -259,34 +264,29 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int
d.Set("port", rsc.Endpoint.Port)
}
d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting)

var iamRoles []string
for _, i := range rsc.IamRoles {
iamRoles = append(iamRoles, aws.StringValue(i.IamRoleArn))
}
d.Set("iam_roles", iamRoles)

d.Set("iam_roles", tfslices.ApplyToAll(rsc.IamRoles, func(v *redshift.ClusterIamRole) string {
return aws.StringValue(v.IamRoleArn)
}))
d.Set("kms_key_id", rsc.KmsKeyId)
d.Set("maintenance_track_name", rsc.MaintenanceTrackName)
d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod)
d.Set("master_username", rsc.MasterUsername)
if v, err := clusterMultiAZStatus(rsc); err != nil {
return sdkdiag.AppendFromErr(diags, err)
} else {
d.Set("multi_az", v)
}
d.Set("node_type", rsc.NodeType)
d.Set("number_of_nodes", rsc.NumberOfNodes)
d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow)
d.Set("publicly_accessible", rsc.PubliclyAccessible)
d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn)
d.Set("maintenance_track_name", rsc.MaintenanceTrackName)
d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod)

if err := d.Set("tags", KeyValueTags(ctx, rsc.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil {
return sdkdiag.AppendErrorf(diags, "setting tags: %s", err)
}

d.Set("vpc_id", rsc.VpcId)

var vpcg []string
for _, g := range rsc.VpcSecurityGroups {
vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId))
}
d.Set("vpc_security_group_ids", vpcg)
d.Set("vpc_security_group_ids", tfslices.ApplyToAll(rsc.VpcSecurityGroups, func(v *redshift.VpcSecurityGroupMembership) string {
return aws.StringValue(v.VpcSecurityGroupId)
}))

loggingStatus, err := conn.DescribeLoggingStatusWithContext(ctx, &redshift.DescribeLoggingStatusInput{
ClusterIdentifier: aws.String(clusterID),
Expand All @@ -297,11 +297,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int
}

if loggingStatus != nil && aws.BoolValue(loggingStatus.LoggingEnabled) {
d.Set("enable_logging", loggingStatus.LoggingEnabled)
d.Set("bucket_name", loggingStatus.BucketName)
d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix)
d.Set("log_exports", flex.FlattenStringSet(loggingStatus.LogExports))
d.Set("enable_logging", loggingStatus.LoggingEnabled)
d.Set("log_destination_type", loggingStatus.LogDestinationType)
d.Set("log_exports", aws.StringValueSlice(loggingStatus.LogExports))
d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix)
}

return diags
Expand Down
71 changes: 71 additions & 0 deletions internal/service/redshift/cluster_data_source_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ func TestAccRedshiftClusterDataSource_basic(t *testing.T) {
resource.TestCheckResourceAttrSet(dataSourceName, "encrypted"),
resource.TestCheckResourceAttrSet(dataSourceName, "endpoint"),
resource.TestCheckResourceAttrSet(dataSourceName, "master_username"),
resource.TestCheckResourceAttrSet(dataSourceName, "multi_az"),
resource.TestCheckResourceAttrSet(dataSourceName, "node_type"),
resource.TestCheckResourceAttrSet(dataSourceName, "number_of_nodes"),
resource.TestCheckResourceAttrSet(dataSourceName, "port"),
Expand Down Expand Up @@ -126,6 +127,27 @@ func TestAccRedshiftClusterDataSource_availabilityZoneRelocationEnabled(t *testi
})
}

func TestAccRedshiftClusterDataSource_multiAZEnabled(t *testing.T) {
ctx := acctest.Context(t)
dataSourceName := "data.aws_redshift_cluster.test"
resourceName := "aws_redshift_cluster.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
Steps: []resource.TestStep{
{
Config: testAccClusterDataSourceConfig_multiAZEnabled(rName),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttrPair(dataSourceName, "multi_az", resourceName, "multi_az"),
),
},
},
})
}

func testAccClusterDataSourceConfig_basic(rName string) string {
return fmt.Sprintf(`
resource "aws_redshift_cluster" "test" {
Expand Down Expand Up @@ -261,3 +283,52 @@ data "aws_redshift_cluster" "test" {
}
`, rName)
}

func testAccClusterDataSourceConfig_multiAZEnabled(rName string) string {
return fmt.Sprintf(`
resource "aws_kms_key" "test" {
description = %[1]q

policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "kms-tf-1",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:*",
"Resource": "*"
}
]
}
POLICY
}

resource "aws_redshift_cluster" "test" {
cluster_identifier = %[1]q
database_name = "mydb"
master_username = "foo_test"
master_password = "Mustbe8characters"
node_type = "ra3.xlplus"
number_of_nodes = 2
cluster_type = "multi-node"
automated_snapshot_retention_period = 1
allow_version_upgrade = false
skip_final_snapshot = true
encrypted = true
kms_key_id = aws_kms_key.test.arn

publicly_accessible = false
availability_zone_relocation_enabled = false
multi_az = true
}

data "aws_redshift_cluster" "test" {
cluster_identifier = aws_redshift_cluster.test.cluster_identifier
}
`, rName)
}
89 changes: 89 additions & 0 deletions internal/service/redshift/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func TestAccRedshiftCluster_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "aqua_configuration_status", "auto"),
resource.TestCheckResourceAttr(resourceName, "maintenance_track_name", "current"),
resource.TestCheckResourceAttr(resourceName, "manual_snapshot_retention_period", "-1"),
resource.TestCheckResourceAttr(resourceName, "multi_az", "false"),
resource.TestCheckResourceAttr(resourceName, "iam_roles.#", "0"),
resource.TestCheckResourceAttr(resourceName, "tags.#", "0"),
),
Expand Down Expand Up @@ -871,6 +872,47 @@ func TestAccRedshiftCluster_manageMasterPassword(t *testing.T) {
})
}

func TestAccRedshiftCluster_multiAZ(t *testing.T) {
ctx := acctest.Context(t)
var v redshift.Cluster
resourceName := "aws_redshift_cluster.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckClusterDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccClusterConfig_multiAZ(rName, true),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &v),
resource.TestCheckResourceAttr(resourceName, "multi_az", "true"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"final_snapshot_identifier",
"master_password",
"skip_final_snapshot",
"apply_immediately",
},
},
{
Config: testAccClusterConfig_multiAZ(rName, false),
Check: resource.ComposeTestCheckFunc(
testAccCheckClusterExists(ctx, resourceName, &v),
resource.TestCheckResourceAttr(resourceName, "multi_az", "false"),
),
},
},
})
}

func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := acctest.Provider.Meta().(*conns.AWSClient).RedshiftConn(ctx)
Expand Down Expand Up @@ -1039,6 +1081,7 @@ resource "aws_redshift_cluster" "test" {
database_name = "mydb"
master_username = "foo_test"
master_password = "Mustbe8characters"
multi_az = false
node_type = "dc2.large"
automated_snapshot_retention_period = 0
allow_version_upgrade = false
Expand Down Expand Up @@ -1787,3 +1830,49 @@ resource "aws_redshift_cluster" "test" {
}
`, rName))
}

func testAccClusterConfig_multiAZ(rName string, enabled bool) string {
return acctest.ConfigCompose(
fmt.Sprintf(`
resource "aws_kms_key" "test" {
description = %[1]q

policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "kms-tf-1",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:*",
"Resource": "*"
}
]
}
POLICY
}

resource "aws_redshift_cluster" "test" {
cluster_identifier = %[1]q
database_name = "mydb"
master_username = "foo_test"
master_password = "Mustbe8characters"
node_type = "ra3.xlplus"
number_of_nodes = 2
cluster_type = "multi-node"
automated_snapshot_retention_period = 1
allow_version_upgrade = false
skip_final_snapshot = true
encrypted = true
kms_key_id = aws_kms_key.test.arn

publicly_accessible = false
availability_zone_relocation_enabled = false
multi_az = %[2]t
}
`, rName, enabled))
}
1 change: 1 addition & 0 deletions internal/service/redshift/service_package_gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions website/docs/d/redshift_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ This data source exports the following attributes in addition to the arguments a
* `iam_roles` - IAM roles associated to the cluster
* `kms_key_id` - KMS encryption key associated to the cluster
* `master_username` - Username for the master DB user
* `multi_az` - If the cluster is a Multi-AZ deployment
* `node_type` - Cluster node type
* `number_of_nodes` - Number of nodes in the cluster
* `maintenance_track_name` - The name of the maintenance track for the restored cluster.
Expand Down
1 change: 1 addition & 0 deletions website/docs/r/redshift_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ This resource supports the following arguments:
Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number.
* `master_password_secret_kms_key_id` - (Optional) ID of the KMS key used to encrypt the cluster admin credentials secret.
* `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user.
* `multi_az` - (Optional) Specifies if the Redshift cluster is multi-AZ.
* `vpc_security_group_ids` - (Optional) A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.
* `cluster_subnet_group_name` - (Optional) The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).
* `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if `availability_zone_relocation_enabled` is `true`.
Expand Down