Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add RDS Restore Option #2728

Merged
merged 15 commits into from
Apr 25, 2018
208 changes: 207 additions & 1 deletion aws/resource_aws_db_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,46 @@ func resourceAwsDbInstance() *schema.Resource {
},
},

"s3_import": {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should add ForceNew: true to all child attributes of s3_import as there is no "update" available for it. 😄

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added this, though I could see long term an interesting debate. if it's use as a spring-board to create a database, should be able to remove the backup file after the database is created. SO in theory, deleting the block shouldn't cause a rebuild/recreate. But I guess that can be done with ignore attributes block instead...

Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{
"snapshot_identifier",
"replicate_source_db",
},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"bucket_prefix": {
Type: schema.TypeString,
Required: false,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Optional: true is defined, so Required: false is extraneous

Optional: true,
ForceNew: true,
},
"ingestion_role": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"source_engine": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"source_engine_version": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
},
},

"skip_final_snapshot": {
Type: schema.TypeBool,
Optional: true,
Expand Down Expand Up @@ -451,6 +491,173 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}
} else if v, ok := d.GetOk("s3_import"); ok {

if _, ok := d.GetOk("allocated_storage"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "allocated_storage": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("engine"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "engine": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("password"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "password": required field is not set`, d.Get("name").(string))
}
if _, ok := d.GetOk("username"); !ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string))
}

s3_bucket := v.([]interface{})[0].(map[string]interface{})
opts := rds.RestoreDBInstanceFromS3Input{
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)),
DBName: aws.String(d.Get("name").(string)),
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)),
S3BucketName: aws.String(s3_bucket["bucket_name"].(string)),
S3Prefix: aws.String(s3_bucket["bucket_prefix"].(string)),
S3IngestionRoleArn: aws.String(s3_bucket["ingestion_role"].(string)),
MasterUsername: aws.String(d.Get("username").(string)),
MasterUserPassword: aws.String(d.Get("password").(string)),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
SourceEngine: aws.String(s3_bucket["source_engine"].(string)),
SourceEngineVersion: aws.String(s3_bucket["source_engine_version"].(string)),
Tags: tags,
}

if attr, ok := d.GetOk("multi_az"); ok {
opts.MultiAZ = aws.Bool(attr.(bool))

}

if _, ok := d.GetOk("character_set_name"); ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "character_set_name" doesn't work with with restores"`, d.Get("name").(string))
}
if _, ok := d.GetOk("timezone"); ok {
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "timezone" doesn't work with with restores"`, d.Get("name").(string))
}

attr := d.Get("backup_retention_period")
opts.BackupRetentionPeriod = aws.Int64(int64(attr.(int)))

if attr, ok := d.GetOk("maintenance_window"); ok {
opts.PreferredMaintenanceWindow = aws.String(attr.(string))
}

if attr, ok := d.GetOk("backup_window"); ok {
opts.PreferredBackupWindow = aws.String(attr.(string))
}

if attr, ok := d.GetOk("license_model"); ok {
opts.LicenseModel = aws.String(attr.(string))
}
if attr, ok := d.GetOk("parameter_group_name"); ok {
opts.DBParameterGroupName = aws.String(attr.(string))
}

if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.VpcSecurityGroupIds = s
}

if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 {
var s []*string
for _, v := range attr.List() {
s = append(s, aws.String(v.(string)))
}
opts.DBSecurityGroups = s
}
if attr, ok := d.GetOk("storage_type"); ok {
opts.StorageType = aws.String(attr.(string))
}

if attr, ok := d.GetOk("db_subnet_group_name"); ok {
opts.DBSubnetGroupName = aws.String(attr.(string))
}

if attr, ok := d.GetOk("iops"); ok {
opts.Iops = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("availability_zone"); ok {
opts.AvailabilityZone = aws.String(attr.(string))
}

if attr, ok := d.GetOk("monitoring_role_arn"); ok {
opts.MonitoringRoleArn = aws.String(attr.(string))
}

if attr, ok := d.GetOk("monitoring_interval"); ok {
opts.MonitoringInterval = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("option_group_name"); ok {
opts.OptionGroupName = aws.String(attr.(string))
}

if attr, ok := d.GetOk("kms_key_id"); ok {
opts.KmsKeyId = aws.String(attr.(string))
}

if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
opts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
}

log.Printf("[DEBUG] DB Instance S3 Restore configuration: %#v", opts)
var err error
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err = conn.RestoreDBInstanceFromS3(&opts)
if err != nil {
if isAWSErr(err, "InvalidParameterValue", "ENHANCED_MONITORING") {
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidParameterValue", "S3_SNAPSHOT_INGESTION") {
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidParameterValue", "S3 bucket cannot be found") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return fmt.Errorf("Error creating DB Instance: %s", err)
}

d.SetId(d.Get("identifier").(string))

log.Printf("[INFO] DB Instance ID: %s", d.Id())

log.Println(
"[INFO] Waiting for DB Instance to be available")

stateConf := &resource.StateChangeConf{
Pending: resourceAwsDbInstanceCreatePendingStates,
Target: []string{"available", "storage-optimization"},
Refresh: resourceAwsDbInstanceStateRefreshFunc(d.Id(), conn),
Timeout: d.Timeout(schema.TimeoutCreate),
MinTimeout: 10 * time.Second,
Delay: 30 * time.Second, // Wait 30 secs before starting
}

// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return err
}

return resourceAwsDbInstanceRead(d, meta)
} else if _, ok := d.GetOk("snapshot_identifier"); ok {
opts := rds.RestoreDBInstanceFromDBSnapshotInput{
DBInstanceClass: aws.String(d.Get("instance_class").(string)),
Expand Down Expand Up @@ -509,7 +716,6 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
if attr, ok := d.GetOk("port"); ok {
opts.Port = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("tde_credential_arn"); ok {
opts.TdeCredentialArn = aws.String(attr.(string))
}
Expand Down
153 changes: 153 additions & 0 deletions aws/resource_aws_db_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,27 @@ func TestAccAWSDBInstance_snapshot(t *testing.T) {
})
}

func TestAccAWSDBInstance_s3(t *testing.T) {
var snap rds.DBInstance
bucket := acctest.RandomWithPrefix("tf-acc-test")
uniqueId := acctest.RandomWithPrefix("tf-acc-s3-import-test")
bucketPrefix := acctest.RandString(5)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot,
Steps: []resource.TestStep{
{
Config: testAccSnapshotInstanceConfigWithS3Import(bucket, bucketPrefix, uniqueId),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.s3", &snap),
),
},
},
})
}

func TestAccAWSDBInstance_enhancedMonitoring(t *testing.T) {
var dbInstance rds.DBInstance
rName := acctest.RandString(5)
Expand Down Expand Up @@ -1046,6 +1067,138 @@ resource "aws_db_instance" "snapshot" {
}`, acctest.RandInt())
}

func testAccSnapshotInstanceConfigWithS3Import(bucketName string, bucketPrefix string, uniqueId string) string {
return fmt.Sprintf(`

resource "aws_s3_bucket" "xtrabackup" {
bucket = "%s"
}

resource "aws_s3_bucket_object" "xtrabackup_db" {
bucket = "${aws_s3_bucket.xtrabackup.id}"
key = "%s/mysql-5-6-xtrabackup.tar.gz"
source = "../files/mysql-5-6-xtrabackup.tar.gz"
etag = "${md5(file("../files/mysql-5-6-xtrabackup.tar.gz"))}"
}



resource "aws_iam_role" "rds_s3_access_role" {
name = "%s-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "rds.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}

resource "aws_iam_policy" "test" {
name = "%s-policy"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"${aws_s3_bucket.xtrabackup.arn}",
"${aws_s3_bucket.xtrabackup.arn}/*"
]
}
]
}
POLICY
}

resource "aws_iam_policy_attachment" "test-attach" {
name = "%s-policy-attachment"
roles = [
"${aws_iam_role.rds_s3_access_role.name}"
]

policy_arn = "${aws_iam_policy.test.arn}"
}


// Make sure EVERYTHING required is here...
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
tags {
Name = "terraform-testacc-db-instance-with-subnet-group"
}
}

resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
availability_zone = "us-west-2a"
vpc_id = "${aws_vpc.foo.id}"
tags {
Name = "tf-acc-db-instance-with-subnet-group-1"
}
}

resource "aws_subnet" "bar" {
cidr_block = "10.1.2.0/24"
availability_zone = "us-west-2b"
vpc_id = "${aws_vpc.foo.id}"
tags {
Name = "tf-acc-db-instance-with-subnet-group-2"
}
}

resource "aws_db_subnet_group" "foo" {
name = "%s-subnet-group"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
tags {
Name = "tf-dbsubnet-group-test"
}
}


resource "aws_db_instance" "s3" {
identifier = "%s-db"

allocated_storage = 5
engine = "mysql"
engine_version = "5.6"
auto_minor_version_upgrade = true
instance_class = "db.t2.small"
name = "baz"
password = "barbarbarbar"
publicly_accessible = false
username = "foo"
backup_retention_period = 0

parameter_group_name = "default.mysql5.6"
skip_final_snapshot = true
multi_az = false
db_subnet_group_name = "${aws_db_subnet_group.foo.id}"

s3_import {
source_engine = "mysql"
source_engine_version = "5.6"

bucket_name = "${aws_s3_bucket.xtrabackup.bucket}"
bucket_prefix = "%s"
ingestion_role = "${aws_iam_role.rds_s3_access_role.arn}"
}
}
`, bucketName, bucketPrefix, uniqueId, uniqueId, uniqueId, uniqueId, uniqueId, bucketPrefix)
}

func testAccSnapshotInstanceConfigWithSnapshot(rInt int) string {
return fmt.Sprintf(`
resource "aws_db_instance" "snapshot" {
Expand Down
Binary file added files/mysql-5-6-xtrabackup.tar.gz
Binary file not shown.
Loading