diff --git a/.changelog/43700.txt b/.changelog/43700.txt new file mode 100644 index 000000000000..55fe35d8e024 --- /dev/null +++ b/.changelog/43700.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_ec2_stop_instance +``` \ No newline at end of file diff --git a/.changelog/43790.txt b/.changelog/43790.txt new file mode 100644 index 000000000000..0c9b5cda41c5 --- /dev/null +++ b/.changelog/43790.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_odb_cloud_vm_cluster +``` + +```release-note:new-data-source +aws_odb_cloud_vm_cluster +``` \ No newline at end of file diff --git a/.changelog/43955.txt b/.changelog/43955.txt new file mode 100644 index 000000000000..8e94f5403e01 --- /dev/null +++ b/.changelog/43955.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_cloudfront_create_invalidation +``` \ No newline at end of file diff --git a/.changelog/43960.txt b/.changelog/43960.txt new file mode 100644 index 000000000000..ffb8546178a7 --- /dev/null +++ b/.changelog/43960.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_batch_job_queue: Adds List support +``` diff --git a/.changelog/43972.txt b/.changelog/43972.txt new file mode 100644 index 000000000000..c63e8a43cc9d --- /dev/null +++ b/.changelog/43972.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_lambda_invoke +``` \ No newline at end of file diff --git a/.changelog/44129.txt b/.changelog/44129.txt new file mode 100644 index 000000000000..91ef87bad0dc --- /dev/null +++ b/.changelog/44129.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_instance: Adds List support +``` + +```release-note:enhancement +resource/aws_iam_role: Adds List support +``` + +```release-note:enhancement +resource/aws_cloudwatch_log_group: Adds List support +``` diff --git a/.changelog/44214.txt b/.changelog/44214.txt new file mode 100644 index 000000000000..c89ba2066647 --- /dev/null +++ b/.changelog/44214.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_ses_send_email +``` \ No newline at end of file diff --git a/.changelog/44232.txt b/.changelog/44232.txt new file mode 100644 index 000000000000..e00988ef1c85 --- /dev/null +++ b/.changelog/44232.txt @@ -0,0 +1,3 @@ +```release-note:new-action +aws_sns_publish +``` \ No newline at end of file diff --git a/.changelog/44286.txt b/.changelog/44286.txt new file mode 100644 index 000000000000..4752571ab871 --- /dev/null +++ b/.changelog/44286.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sfn_state_machine: Add resource identity support +``` \ No newline at end of file diff --git a/.changelog/44289.txt b/.changelog/44289.txt new file mode 100644 index 000000000000..4f44752b1eaf --- /dev/null +++ b/.changelog/44289.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ecs_service: Add `deployment_configuration.lifecycle_hook.hook_details` argument +``` diff --git a/.ci/tools/go.mod b/.ci/tools/go.mod index e090e6e1847c..09cc80a628ea 100644 --- a/.ci/tools/go.mod +++ b/.ci/tools/go.mod @@ -28,7 +28,7 @@ require ( cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.53.0 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect - dario.cat/mergo v1.0.1 // indirect + dario.cat/mergo v1.0.2 // indirect dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect dev.gaijin.team/go/golib v0.6.0 // indirect github.com/4meepo/tagalign v1.4.3 // indirect @@ -44,7 +44,7 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect @@ -313,7 +313,7 @@ require ( github.com/sourcegraph/go-lsp v0.0.0-20200429204803-219e11d77f5d // indirect github.com/sourcegraph/jsonrpc2 v0.2.1 // indirect github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.8.0 // indirect + github.com/spf13/cast v1.10.0 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.7 // indirect github.com/spf13/viper v1.20.1 // indirect @@ -351,7 +351,7 @@ require ( github.com/ykadowak/zerologlint v0.1.5 // indirect github.com/yuin/goldmark v1.7.13 // indirect github.com/yuin/goldmark-meta v1.1.0 // indirect - github.com/zclconf/go-cty v1.16.4 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect github.com/zclconf/go-cty-yaml v1.1.0 // indirect github.com/zeebo/errs v1.4.0 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect diff --git a/.ci/tools/go.sum b/.ci/tools/go.sum index 99690185e0e2..ec878219c42f 100644 --- a/.ci/tools/go.sum +++ b/.ci/tools/go.sum @@ -622,8 +622,8 @@ cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6MfY= codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= dev.gaijin.team/go/exhaustruct/v4 v4.0.0 h1:873r7aNneqoBB3IaFIzhvt2RFYTuHgmMjoKfwODoI1Y= dev.gaijin.team/go/exhaustruct/v4 v4.0.0/go.mod h1:aZ/k2o4Y05aMJtiux15x8iXaumE88YdiB0Ai4fXOzPI= dev.gaijin.team/go/golib v0.6.0 h1:v6nnznFTs4bppib/NyU1PQxobwDHwCXXl15P7DV5Zgo= @@ -681,8 +681,8 @@ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6 github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= @@ -1826,8 +1826,8 @@ github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcD github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -1955,8 +1955,8 @@ github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBv github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.16.4 h1:QGXaag7/7dCzb+odlGrgr+YmYZFaOCMW6DEpS+UD1eE= -github.com/zclconf/go-cty v1.16.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= diff --git a/CHANGELOG.md b/CHANGELOG.md index 452d0a10eae6..a8ef7c269ad7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,20 +1,32 @@ -## 6.14.0 (Unreleased) +## 6.15.0 (Unreleased) + +## 6.14.0 (September 18, 2025) FEATURES: * **New Data Source:** `aws_billing_views` ([#44272](https://github.com/hashicorp/terraform-provider-aws/issues/44272)) * **New Data Source:** `aws_odb_cloud_autonomous_vm_cluster` ([#43809](https://github.com/hashicorp/terraform-provider-aws/issues/43809)) * **New Data Source:** `aws_odb_cloud_exadata_infrastructure` ([#43650](https://github.com/hashicorp/terraform-provider-aws/issues/43650)) +* **New Data Source:** `aws_odb_cloud_vm_cluster` ([#43790](https://github.com/hashicorp/terraform-provider-aws/issues/43790)) * **New Data Source:** `aws_odb_network` ([#43715](https://github.com/hashicorp/terraform-provider-aws/issues/43715)) +* **New Data Source:** `aws_odb_network_peering_connection` ([#43757](https://github.com/hashicorp/terraform-provider-aws/issues/43757)) * **New Resource:** `aws_controltower_baseline` ([#42397](https://github.com/hashicorp/terraform-provider-aws/issues/42397)) * **New Resource:** `aws_odb_cloud_autonomous_vm_cluster` ([#43809](https://github.com/hashicorp/terraform-provider-aws/issues/43809)) * **New Resource:** `aws_odb_cloud_exadata_infrastructure` ([#43650](https://github.com/hashicorp/terraform-provider-aws/issues/43650)) +* **New Resource:** `aws_odb_cloud_vm_cluster` ([#43790](https://github.com/hashicorp/terraform-provider-aws/issues/43790)) * **New Resource:** `aws_odb_network` ([#43715](https://github.com/hashicorp/terraform-provider-aws/issues/43715)) +* **New Resource:** `aws_odb_network_peering_connection` ([#43757](https://github.com/hashicorp/terraform-provider-aws/issues/43757)) ENHANCEMENTS: +* resource/aws_batch_job_queue: Adds List support ([#43960](https://github.com/hashicorp/terraform-provider-aws/issues/43960)) +* resource/aws_cloudwatch_log_group: Adds List support ([#44129](https://github.com/hashicorp/terraform-provider-aws/issues/44129)) +* resource/aws_ecs_service: Add `deployment_configuration.lifecycle_hook.hook_details` argument ([#44289](https://github.com/hashicorp/terraform-provider-aws/issues/44289)) +* resource/aws_iam_role: Adds List support ([#44129](https://github.com/hashicorp/terraform-provider-aws/issues/44129)) +* resource/aws_instance: Adds List support ([#44129](https://github.com/hashicorp/terraform-provider-aws/issues/44129)) * resource/aws_rds_global_cluster: Remove provider-side conflict between `source_db_cluster_identifier` and `engine` arguments ([#44252](https://github.com/hashicorp/terraform-provider-aws/issues/44252)) * resource/aws_scheduler_schedule: Add `action_after_completion` argument ([#44264](https://github.com/hashicorp/terraform-provider-aws/issues/44264)) +* resource/aws_sfn_state_machine: Add resource identity support ([#44286](https://github.com/hashicorp/terraform-provider-aws/issues/44286)) BUG FIXES: diff --git a/GNUmakefile b/GNUmakefile index ae5c3f39eac2..6f1a93f77ed0 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -897,7 +897,6 @@ website-tflint: tflint-init ## [CI] Website Checks / tflint "--disable-rule=aws_s3_object_copy_invalid_source" \ "--disable-rule=aws_servicecatalog_portfolio_share_invalid_type" \ "--disable-rule=aws_transfer_ssh_key_invalid_body" \ - "--disable-rule=aws_worklink_website_certificate_authority_association_invalid_certificate" \ "--disable-rule=terraform_unused_declarations" \ "--disable-rule=terraform_typed_variables" \ ) ; \ diff --git a/examples/odb/vm_cluster.tf b/examples/odb/vm_cluster.tf new file mode 100644 index 000000000000..55837e8f14e7 --- /dev/null +++ b/examples/odb/vm_cluster.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +resource "aws_odb_cloud_vm_cluster" "with_minimum_parameter" { + display_name = "my-exa-infra" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["public-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["db-server-1", "db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } +} + + +resource "aws_odb_cloud_vm_cluster" "with_all_parameters" { + display_name = "my-vmc" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["my-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["my-dbserver-1", "my-db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index 35817c817c63..6a6d69cdb6a9 100644 --- a/go.mod +++ b/go.mod @@ -289,7 +289,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.7.0 - github.com/hashicorp/hcl/v2 v2.23.0 + github.com/hashicorp/hcl/v2 v2.24.0 github.com/hashicorp/terraform-json v0.27.2 github.com/hashicorp/terraform-plugin-framework v1.16.0 github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 @@ -299,8 +299,8 @@ require ( github.com/hashicorp/terraform-plugin-go v0.29.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-mux v0.21.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 - github.com/hashicorp/terraform-plugin-testing v1.13.3 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0 + github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1 github.com/jaswdr/faker/v2 v2.8.0 github.com/jmespath/go-jmespath v0.4.0 github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38 @@ -349,7 +349,7 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.23.0 // indirect + github.com/hashicorp/terraform-exec v0.24.0 // indirect github.com/hashicorp/terraform-registry-address v0.4.0 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.2 // indirect @@ -368,7 +368,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zclconf/go-cty v1.16.4 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect diff --git a/go.sum b/go.sum index a463a0e19a4a..f290a0d72fe4 100644 --- a/go.sum +++ b/go.sum @@ -661,12 +661,12 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= +github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE= +github.com/hashicorp/terraform-exec v0.24.0/go.mod h1:lluc/rDYfAhYdslLJQg3J0oDqo88oGQAdHR+wDqFvo4= github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= github.com/hashicorp/terraform-plugin-framework v1.16.0 h1:tP0f+yJg0Z672e7levixDe5EpWwrTrNryPM9kDMYIpE= @@ -683,10 +683,10 @@ github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+ github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= github.com/hashicorp/terraform-plugin-mux v0.21.0 h1:QsEYnzSD2c3zT8zUrUGqaFGhV/Z8zRUlU7FY3ZPJFfw= github.com/hashicorp/terraform-plugin-mux v0.21.0/go.mod h1:Qpt8+6AD7NmL0DS7ASkN0EXpDQ2J/FnnIgeUr1tzr5A= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-plugin-testing v1.13.3 h1:QLi/khB8Z0a5L54AfPrHukFpnwsGL8cwwswj4RZduCo= -github.com/hashicorp/terraform-plugin-testing v1.13.3/go.mod h1:WHQ9FDdiLoneey2/QHpGM/6SAYf4A7AZazVg7230pLE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0 h1:PQP7Crrc7t/ozj+P9x0/lsTzGNy3lVppH8zAJylofaE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0/go.mod h1:GQhpKVvvuwzD79e8/NZ+xzj+ZpWovdPAe8nfV/skwNU= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1 h1:caWmY2Fv/KgDAXU7IVjcBDfIdmr/n6VRYhCLxNmlaXs= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1/go.mod h1:jVm3pD9uQAT0X2RSEdcqjju2bCGv5f73DGZFU4v7EAU= github.com/hashicorp/terraform-registry-address v0.4.0 h1:S1yCGomj30Sao4l5BMPjTGZmCNzuv7/GDTDX99E9gTk= github.com/hashicorp/terraform-registry-address v0.4.0/go.mod h1:LRS1Ay0+mAiRkUyltGT+UHWkIqTFvigGn/LbMshfflE= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -755,8 +755,8 @@ github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXq github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -790,8 +790,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.16.4 h1:QGXaag7/7dCzb+odlGrgr+YmYZFaOCMW6DEpS+UD1eE= -github.com/zclconf/go-cty v1.16.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= diff --git a/internal/acctest/knownvalue/global_arn_exact.go b/internal/acctest/knownvalue/global_arn_exact.go new file mode 100644 index 000000000000..2030dd5538b9 --- /dev/null +++ b/internal/acctest/knownvalue/global_arn_exact.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package statecheck + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +var _ knownvalue.Check = globalARNExact{} + +type globalARNExact struct { + service string + resource string +} + +// CheckValue determines whether the passed value is of type string, and +// contains a matching sequence of bytes. +func (v globalARNExact) CheckValue(other any) error { + otherVal, ok := other.(string) + + if !ok { + return fmt.Errorf("expected string value for GlobalARNExact check, got: %T", other) + } + + if otherVal != v.buildARNString() { + return fmt.Errorf("expected value %s for GlobalARNExact check, got: %s", v.buildARNString(), otherVal) + } + + return nil +} + +// String returns the string representation of the value. +func (v globalARNExact) String() string { + return v.buildARNString() +} + +func (v globalARNExact) buildARNString() string { + return arn.ARN{ + AccountID: acctest.AccountID(context.Background()), + Partition: acctest.Partition(), + Region: "", + Service: v.service, + Resource: v.resource, + }.String() +} + +func GlobalARNExact(service, resource string) knownvalue.Check { + return globalARNExact{ + service: service, + resource: resource, + } +} diff --git a/internal/conns/conns.go b/internal/conns/conns.go index e59ca4c0feae..66256f099b82 100644 --- a/internal/conns/conns.go +++ b/internal/conns/conns.go @@ -5,6 +5,7 @@ package conns import ( "context" + "iter" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/vcr" @@ -34,6 +35,16 @@ type ServicePackageWithEphemeralResources interface { EphemeralResources(context.Context) []*types.ServicePackageEphemeralResource } +type ServicePackageWithFrameworkListResources interface { + ServicePackage + FrameworkListResources(context.Context) iter.Seq[*types.ServicePackageFrameworkListResource] +} + +type ServicePackageWithSDKListResources interface { + ServicePackage + SDKListResources(ctx context.Context) iter.Seq[*types.ServicePackageSDKListResource] +} + type ( contextKeyType int ) diff --git a/internal/errs/fwdiag/diags.go b/internal/errs/fwdiag/diags.go index 7a09be2c7d73..c442e30c79cf 100644 --- a/internal/errs/fwdiag/diags.go +++ b/internal/errs/fwdiag/diags.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" ) // DiagnosticsError returns an error containing all Diagnostic with SeverityError @@ -59,6 +60,17 @@ func NewResourceNotFoundWarningDiagnostic(err error) diag.Diagnostic { ) } +func NewListResultErrorDiagnostic(err error) list.ListResult { + return list.ListResult{ + Diagnostics: diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + err.Error(), + ), + }, + } +} + func AsError[T any](x T, diags diag.Diagnostics) (T, error) { return x, DiagnosticsError(diags) } diff --git a/internal/framework/list_resource_with_sdkv2_resource.go b/internal/framework/list_resource_with_sdkv2_resource.go new file mode 100644 index 000000000000..c3d84e4b100f --- /dev/null +++ b/internal/framework/list_resource_with_sdkv2_resource.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "unique" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + tfunique "github.com/hashicorp/terraform-provider-aws/internal/unique" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type WithRegionSpec interface { + SetRegionSpec(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion]) +} + +type ListResourceWithSDKv2Resource struct { + resourceSchema *schema.Resource + identitySpec inttypes.Identity + identitySchema *schema.ResourceIdentity + regionSpec unique.Handle[inttypes.ServicePackageResourceRegion] +} + +func (l *ListResourceWithSDKv2Resource) SetRegionSpec(regionSpec unique.Handle[inttypes.ServicePackageResourceRegion]) { + l.regionSpec = regionSpec + + var isRegionOverrideEnabled bool + if !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled { + if _, ok := l.resourceSchema.SchemaMap()[names.AttrRegion]; !ok { + // TODO: Use standard shared `region` attribute + l.resourceSchema.SchemaMap()[names.AttrRegion] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + } + } + } +} + +func (l *ListResourceWithSDKv2Resource) SetIdentitySpec(identitySpec inttypes.Identity) { + out := make(map[string]*schema.Schema) + for _, v := range identitySpec.Attributes { + out[v.Name()] = &schema.Schema{ + Type: schema.TypeString, + } + if v.Required() { + out[v.Name()].Required = true + } else { + out[v.Name()].Optional = true + } + } + + identitySchema := schema.ResourceIdentity{ + SchemaFunc: func() map[string]*schema.Schema { + return out + }, + } + + l.identitySchema = &identitySchema + l.resourceSchema.Identity = &identitySchema + l.identitySpec = identitySpec +} + +func (l *ListResourceWithSDKv2Resource) RawV5Schemas(ctx context.Context, _ list.RawV5SchemaRequest, response *list.RawV5SchemaResponse) { + response.ProtoV5Schema = l.resourceSchema.ProtoSchema(ctx)() + response.ProtoV5IdentitySchema = l.resourceSchema.ProtoIdentitySchema(ctx)() +} + +func (l *ListResourceWithSDKv2Resource) SetResourceSchema(resource *schema.Resource) { + l.resourceSchema = resource +} + +func (l *ListResourceWithSDKv2Resource) ResourceData() *schema.ResourceData { + return l.resourceSchema.Data(&terraform.InstanceState{}) +} + +func (l *ListResourceWithSDKv2Resource) setResourceIdentity(ctx context.Context, client *conns.AWSClient, d *schema.ResourceData) error { + identity, err := d.Identity() + if err != nil { + return err + } + + for _, attr := range l.identitySpec.Attributes { + switch attr.Name() { + case names.AttrAccountID: + if err := identity.Set(attr.Name(), client.AccountID(ctx)); err != nil { + return err + } + + case names.AttrRegion: + if err := identity.Set(attr.Name(), client.Region(ctx)); err != nil { + return err + } + + default: + val, ok := getAttributeOk(d, attr.ResourceAttributeName()) + if !ok { + continue + } + if err := identity.Set(attr.Name(), val); err != nil { + return err + } + } + } + + return nil +} + +type resourceData interface { + Id() string + GetOk(string) (any, bool) +} + +func getAttributeOk(d resourceData, name string) (string, bool) { + if name == "id" { + return d.Id(), true + } + if v, ok := d.GetOk(name); !ok { + return "", false + } else { + return v.(string), true + } +} + +func (l *ListResourceWithSDKv2Resource) SetResult(ctx context.Context, awsClient *conns.AWSClient, includeResource bool, result *list.ListResult, rd *schema.ResourceData) { + err := l.setResourceIdentity(ctx, awsClient, rd) + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred setting resource identity. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + tfTypeIdentity, err := rd.TfTypeIdentityState() + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred converting identity state. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + result.Diagnostics.Append(result.Identity.Set(ctx, *tfTypeIdentity)...) + if result.Diagnostics.HasError() { + return + } + + if includeResource { + if !tfunique.IsHandleNil(l.regionSpec) && l.regionSpec.Value().IsOverrideEnabled { + if err := rd.Set(names.AttrRegion, awsClient.Region(ctx)); err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + } + + tfTypeResource, err := rd.TfTypeResourceState() + if err != nil { + result.Diagnostics.Append(diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + "An unexpected error occurred converting resource state. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + "Error: "+err.Error(), + )) + return + } + + result.Diagnostics.Append(result.Resource.Set(ctx, *tfTypeResource)...) + if result.Diagnostics.HasError() { + return + } + } +} diff --git a/internal/framework/list_resource_with_sdkv2_tags.go b/internal/framework/list_resource_with_sdkv2_tags.go new file mode 100644 index 000000000000..03f017a0c4b8 --- /dev/null +++ b/internal/framework/list_resource_with_sdkv2_tags.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "unique" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/provider/interceptors" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type ListResourceWithSDKv2Tags struct { + tagSpec interceptors.HTags +} + +func (r *ListResourceWithSDKv2Tags) SetTagsSpec(tags unique.Handle[inttypes.ServicePackageResourceTags]) { + r.tagSpec = interceptors.HTags(tags) +} + +func (r *ListResourceWithSDKv2Tags) SetTags(ctx context.Context, client *conns.AWSClient, d *schema.ResourceData) error { + sp, _, _, tagsInContext, ok := interceptors.InfoFromContext(ctx, client) + if !ok { + return nil + } + + // If the R handler didn't set tags, try and read them from the service API. + if tagsInContext.TagsOut.IsNone() { + // Some old resources may not have the required attribute set after Read: + // https://github.com/hashicorp/terraform-provider-aws/issues/31180 + if identifier := r.tagSpec.GetIdentifierSDKv2(ctx, d); identifier != "" { + if err := r.tagSpec.ListTags(ctx, sp, client, identifier); err != nil { + return err + } + } + } + + // Remove any provider configured ignore_tags and system tags from those returned from the service API. + tags := tagsInContext.TagsOut.UnwrapOrDefault().IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(client.IgnoreTagsConfig(ctx)) + + // The resource's configured tags can now include duplicate tags that have been configured on the provider. + if err := d.Set(names.AttrTags, tags.ResolveDuplicates(ctx, client.DefaultTagsConfig(ctx), client.IgnoreTagsConfig(ctx), d, names.AttrTags, nil).Map()); err != nil { + return err + } + + // Computed tags_all do. + if err := d.Set(names.AttrTagsAll, tags.Map()); err != nil { + return err + } + + // reset tags in context for next resource + tagsInContext.TagsOut = nil + + return nil +} diff --git a/internal/framework/with_identity.go b/internal/framework/with_identity.go new file mode 100644 index 000000000000..a3b06bebd2bd --- /dev/null +++ b/internal/framework/with_identity.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" +) + +type Identityer interface { + SetIdentitySpec(identity inttypes.Identity) +} + +var _ Identityer = &WithIdentity{} + +type WithIdentity struct { + identity inttypes.Identity +} + +func (w *WithIdentity) SetIdentitySpec(identity inttypes.Identity) { + w.identity = identity +} + +func (w WithIdentity) IdentitySpec() inttypes.Identity { + return w.identity +} diff --git a/internal/framework/with_import_by_identity.go b/internal/framework/with_import_by_identity.go index 3f34178ff8b0..987e2d2bd6f3 100644 --- a/internal/framework/with_import_by_identity.go +++ b/internal/framework/with_import_by_identity.go @@ -13,7 +13,8 @@ import ( // TODO: Needs a better name type ImportByIdentityer interface { - SetIdentitySpec(identity inttypes.Identity, importSpec inttypes.FrameworkImport) + Identityer + SetImportSpec(importSpec inttypes.FrameworkImport) } var _ ImportByIdentityer = &WithImportByIdentity{} @@ -22,12 +23,11 @@ var _ ImportByIdentityer = &WithImportByIdentity{} // // See: https://developer.hashicorp.com/terraform/plugin/framework/resources/identity#importing-by-identity type WithImportByIdentity struct { - identity inttypes.Identity + WithIdentity importSpec inttypes.FrameworkImport } -func (w *WithImportByIdentity) SetIdentitySpec(identity inttypes.Identity, importSpec inttypes.FrameworkImport) { - w.identity = identity +func (w *WithImportByIdentity) SetImportSpec(importSpec inttypes.FrameworkImport) { w.importSpec = importSpec } @@ -55,10 +55,6 @@ func (w WithImportByIdentity) ImportState(ctx context.Context, request resource. } } -func (w WithImportByIdentity) IdentitySpec() inttypes.Identity { - return w.identity -} - func (w WithImportByIdentity) ImportSpec() inttypes.FrameworkImport { return w.importSpec } diff --git a/internal/framework/with_list.go b/internal/framework/with_list.go new file mode 100644 index 000000000000..1441b0d5be48 --- /dev/null +++ b/internal/framework/with_list.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" +) + +type Lister interface { + AppendResultInterceptor(listresource.ListResultInterceptor) +} + +var _ Lister = &WithList{} + +type WithList struct { + interceptors []listresource.ListResultInterceptor +} + +func (w *WithList) AppendResultInterceptor(interceptor listresource.ListResultInterceptor) { + w.interceptors = append(w.interceptors, interceptor) +} + +func (w WithList) ResultInterceptors() []listresource.ListResultInterceptor { + return w.interceptors +} diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index 8c7b965a3f7c..993a827d920f 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:build generate -// +build generate +//go:build ignore +// +build ignore package main @@ -62,12 +62,14 @@ func main() { v := &visitor{ g: g, - actions: make(map[string]ResourceDatum, 0), - ephemeralResources: make(map[string]ResourceDatum, 0), - frameworkDataSources: make(map[string]ResourceDatum, 0), - frameworkResources: make(map[string]ResourceDatum, 0), - sdkDataSources: make(map[string]ResourceDatum, 0), - sdkResources: make(map[string]ResourceDatum, 0), + actions: make(map[string]ResourceDatum, 0), + ephemeralResources: make(map[string]ResourceDatum, 0), + frameworkDataSources: make(map[string]ResourceDatum, 0), + frameworkListResources: make(map[string]ResourceDatum, 0), + frameworkResources: make(map[string]ResourceDatum, 0), + sdkDataSources: make(map[string]ResourceDatum, 0), + sdkResources: make(map[string]ResourceDatum, 0), + sdkListResources: make(map[string]ResourceDatum, 0), } v.processDir(".") @@ -88,6 +90,36 @@ func main() { } } + for key, value := range v.frameworkListResources { + if val, exists := v.frameworkResources[key]; exists { + value.Name = val.Name + value.IdentityAttributes = val.IdentityAttributes + value.IdentityDuplicateAttrs = val.IdentityDuplicateAttrs + value.ARNIdentity = val.ARNIdentity + value.SingletonIdentity = val.SingletonIdentity + value.TransparentTagging = val.TransparentTagging + value.TagsResourceType = val.TagsResourceType + value.TagsIdentifierAttribute = val.TagsIdentifierAttribute + + v.frameworkListResources[key] = value + } + } + + for key, value := range v.sdkListResources { + if val, exists := v.sdkResources[key]; exists { + value.Name = val.Name + value.IdentityAttributes = val.IdentityAttributes + value.IdentityDuplicateAttrs = val.IdentityDuplicateAttrs + value.ARNIdentity = val.ARNIdentity + value.SingletonIdentity = val.SingletonIdentity + value.TransparentTagging = val.TransparentTagging + value.TagsResourceType = val.TagsResourceType + value.TagsIdentifierAttribute = val.TagsIdentifierAttribute + + v.sdkListResources[key] = value + } + } + s := ServiceDatum{ GenerateClient: l.GenerateClient(), IsGlobal: l.IsGlobal(), @@ -98,28 +130,36 @@ func main() { Actions: v.actions, EphemeralResources: v.ephemeralResources, FrameworkDataSources: v.frameworkDataSources, + FrameworkListResources: v.frameworkListResources, FrameworkResources: v.frameworkResources, SDKDataSources: v.sdkDataSources, SDKResources: v.sdkResources, + SDKListResources: v.sdkListResources, } var imports []goImport - for resource := range maps.Values(v.actions) { + for _, resource := range v.actions { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.ephemeralResources) { + for _, resource := range v.ephemeralResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.frameworkDataSources) { + for _, resource := range v.frameworkDataSources { + imports = append(imports, resource.goImports...) + } + for _, resource := range v.frameworkListResources { + imports = append(imports, resource.goImports...) + } + for _, resource := range v.frameworkResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.frameworkResources) { + for _, resource := range v.sdkDataSources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.sdkDataSources) { + for _, resource := range v.sdkResources { imports = append(imports, resource.goImports...) } - for resource := range maps.Values(v.sdkResources) { + for _, resource := range v.sdkListResources { imports = append(imports, resource.goImports...) } slices.SortFunc(imports, func(a, b goImport) int { @@ -240,9 +280,11 @@ type ServiceDatum struct { Actions map[string]ResourceDatum EphemeralResources map[string]ResourceDatum FrameworkDataSources map[string]ResourceDatum + FrameworkListResources map[string]ResourceDatum FrameworkResources map[string]ResourceDatum SDKDataSources map[string]ResourceDatum SDKResources map[string]ResourceDatum + SDKListResources map[string]ResourceDatum GoImports []goImport } @@ -266,12 +308,14 @@ type visitor struct { functionName string packageName string - actions map[string]ResourceDatum - ephemeralResources map[string]ResourceDatum - frameworkDataSources map[string]ResourceDatum - frameworkResources map[string]ResourceDatum - sdkDataSources map[string]ResourceDatum - sdkResources map[string]ResourceDatum + actions map[string]ResourceDatum + ephemeralResources map[string]ResourceDatum + frameworkDataSources map[string]ResourceDatum + frameworkListResources map[string]ResourceDatum + frameworkResources map[string]ResourceDatum + sdkDataSources map[string]ResourceDatum + sdkResources map[string]ResourceDatum + sdkListResources map[string]ResourceDatum } // processDir scans a single service package directory and processes contained Go sources files. @@ -679,6 +723,48 @@ func (v *visitor) processFuncDecl(funcDecl *ast.FuncDecl) { v.sdkResources[typeName] = d } + case "FrameworkListResource": + if len(args.Positional) == 0 { + v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + typeName := args.Positional[0] + + if !validTypeName.MatchString(typeName) { + v.errs = append(v.errs, fmt.Errorf("invalid type name (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + _, fOK := v.frameworkListResources[typeName] + _, sdkOK := v.sdkListResources[typeName] + if fOK || sdkOK { + v.errs = append(v.errs, fmt.Errorf("duplicate List Resource (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + } else { + v.frameworkListResources[typeName] = d + } + + case "SDKListResource": + if len(args.Positional) == 0 { + v.errs = append(v.errs, fmt.Errorf("no type name: %s", fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + typeName := args.Positional[0] + + if !validTypeName.MatchString(typeName) { + v.errs = append(v.errs, fmt.Errorf("invalid type name (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + continue + } + + _, fOK := v.frameworkListResources[typeName] + _, sdkOK := v.sdkListResources[typeName] + if fOK || sdkOK { + v.errs = append(v.errs, fmt.Errorf("duplicate List Resource (%s): %s", typeName, fmt.Sprintf("%s.%s", v.packageName, v.functionName))) + } else { + v.sdkListResources[typeName] = d + } + case "IdentityAttribute", "ArnIdentity", "ImportIDHandler", "MutableIdentity", "SingletonIdentity", "Region", "Tags", "WrappedImport", "V60SDKv2Fix", "IdentityFix", "CustomImport": // Handled above. case "ArnFormat", "IdAttrFormat", "NoImport", "Testing": diff --git a/internal/generate/servicepackage/service_package_gen.go.gtpl b/internal/generate/servicepackage/service_package_gen.go.gtpl index a1fbb0849a0e..5ce858c7011e 100644 --- a/internal/generate/servicepackage/service_package_gen.go.gtpl +++ b/internal/generate/servicepackage/service_package_gen.go.gtpl @@ -35,6 +35,8 @@ package {{ .ProviderPackage }} import ( "context" + "iter" + "slices" "unique" {{ if .GenerateClient }} @@ -268,6 +270,119 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser } } +{{ if .FrameworkListResources }} +func (p *servicePackage) FrameworkListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageFrameworkListResource] { + return slices.Values([]*inttypes.ServicePackageFrameworkListResource { +{{- range $key, $value := .FrameworkListResources }} + {{- $regionOverrideEnabled := and (not $.IsGlobal) $value.RegionOverrideEnabled }} + { + Factory: {{ $value.FactoryName }}, + TypeName: "{{ $key }}", + Name: "{{ $value.Name }}", + {{- if .TransparentTagging }} + Tags: unique.Make(inttypes.ServicePackageResourceTags { + {{- if ne .TagsIdentifierAttribute "" }} + IdentifierAttribute: {{ .TagsIdentifierAttribute }}, + {{- end }} + {{- if ne .TagsResourceType "" }} + ResourceType: "{{ .TagsResourceType }}", + {{- end }} + }), + {{- end }} + {{- if and $regionOverrideEnabled $value.ValidateRegionOverrideInPartition }} + Region: unique.Make(inttypes.ResourceRegionDefault()), + {{- else if not $regionOverrideEnabled }} + Region: unique.Make(inttypes.ResourceRegionDisabled()), + {{- else }} + Region: unique.Make(inttypes.ServicePackageResourceRegion { + IsOverrideEnabled: {{ $regionOverrideEnabled }}, + IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, + }), + {{- end }} + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.GlobalARNIdentity( + {{- end }} + {{- else }} + {{- if $value.IsARNFormatGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalResourceWithGlobalARNFormat( + {{- end }} + {{- else }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalARNIdentity( + {{- end }} + {{- end }} + {{- end }} + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{ else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- end }} + }, +{{- end }} + }) +} +{{- end }} + func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { return []*inttypes.ServicePackageSDKDataSource { {{- range $key, $value := .SDKDataSources }} @@ -408,6 +523,120 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +{{ if .SDKListResources }} +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource { +{{- range $key, $value := .SDKListResources }} + {{- $regionOverrideEnabled := and (not $.IsGlobal) $value.RegionOverrideEnabled }} + { + Factory: {{ $value.FactoryName }}, + TypeName: "{{ $key }}", + Name: "{{ $value.Name }}", + {{- if and $regionOverrideEnabled $value.ValidateRegionOverrideInPartition }} + Region: unique.Make(inttypes.ResourceRegionDefault()), + {{- else if not $regionOverrideEnabled }} + Region: unique.Make(inttypes.ResourceRegionDisabled()), + {{- else }} + Region: unique.Make(inttypes.ServicePackageResourceRegion { + IsOverrideEnabled: {{ $regionOverrideEnabled }}, + IsValidateOverrideInPartition: {{ $value.ValidateRegionOverrideInPartition }}, + }), + {{- end }} + {{- if .TransparentTagging }} + Tags: unique.Make(inttypes.ServicePackageResourceTags { + {{- if ne .TagsIdentifierAttribute "" }} + IdentifierAttribute: {{ .TagsIdentifierAttribute }}, + {{- end }} + {{- if ne .TagsResourceType "" }} + ResourceType: "{{ .TagsResourceType }}", + {{- end }} + }), + {{- end }} + {{- if gt (len $value.IdentityAttributes) 1 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalParameterizedIdentity([]inttypes.IdentityAttribute{ + {{- range $value.IdentityAttributes }} + {{ template "IdentifierAttribute" . }} + {{- end }} + }, + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if gt (len $value.IdentityAttributes) 0 }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else }} + Identity: inttypes.RegionalSingleParameterIdentity( + {{- range $value.IdentityAttributes -}} + {{ .Name }}, + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- else if $value.ARNIdentity }} + {{- if $.IsGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.GlobalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.GlobalARNIdentity( + {{- end }} + {{- else }} + {{- if $value.IsARNFormatGlobal }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalResourceWithGlobalARNFormatNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalResourceWithGlobalARNFormat( + {{- end }} + {{- else }} + {{- if $value.HasARNAttribute }} + Identity: inttypes.RegionalARNIdentityNamed({{ $value.ARNAttribute }}, + {{- else }} + Identity: inttypes.RegionalARNIdentity( + {{- end }} + {{- end }} + {{- end }} + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- else if $value.SingletonIdentity }} + {{- if or $.IsGlobal $value.IsGlobal }} + Identity: inttypes.GlobalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{ else }} + Identity: inttypes.RegionalSingletonIdentity( + {{- if .HasIdentityDuplicateAttrs -}} + inttypes.WithIdentityDuplicateAttrs({{ range .IdentityDuplicateAttrs }}{{ . }}, {{ end }}), + {{- end -}} + {{- template "CommonIdentityOpts" . -}} + ), + {{- end }} + {{- end }} + }, +{{- end }} + }) +} +{{- end }} + + func (p *servicePackage) ServicePackageName() string { {{- if eq .ProviderPackage "meta" }} return "{{ .ProviderPackage }}" diff --git a/internal/iter/null.go b/internal/iter/null.go new file mode 100644 index 000000000000..ef85ebd92304 --- /dev/null +++ b/internal/iter/null.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iter + +import ( + "iter" +) + +// Null returns an empty iterator. +func Null[V any]() iter.Seq[V] { + return func(yield func(V) bool) {} +} + +// Null2 returns an empty value pair iterator. +func Null2[K, V any]() iter.Seq2[K, V] { + return func(yield func(K, V) bool) {} +} diff --git a/internal/provider/framework/intercept.go b/internal/provider/framework/intercept.go index d70c408d7b38..2d4367f71df2 100644 --- a/internal/provider/framework/intercept.go +++ b/internal/provider/framework/intercept.go @@ -10,9 +10,12 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-framework/action" "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) @@ -256,8 +259,35 @@ func (s interceptorInvocations) resourceImportState() []interceptorFunc[resource }) } -// Only generate strings for use in tests -//go:generate stringer -type=when -output=when_string_test.go +type listInterceptorFunc[Request, Response any] func(context.Context, interceptorOptions[Request, Response]) diag.Diagnostics + +type listResourceListInterceptor interface { + list(context.Context, interceptorOptions[list.ListRequest, list.ListResultsStream]) diag.Diagnostics +} + +// resourceList returns a slice of interceptors that run on resource List. +func (s interceptorInvocations) resourceList() []listInterceptorFunc[list.ListRequest, list.ListResultsStream] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(listResourceListInterceptor) + return ok + }), func(e any) listInterceptorFunc[list.ListRequest, list.ListResultsStream] { + return e.(listResourceListInterceptor).list + }) +} + +type listResourceSchemaInterceptor interface { + schema(context.Context, interceptorOptions[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse]) +} + +// resourceListResourceConfigSchema returns a slice of interceptors that run on resource ListResourceConfigSchema. +func (s interceptorInvocations) resourceListResourceConfigSchema() []interceptorFunc[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse] { + return tfslices.ApplyToAll(tfslices.Filter(s, func(e any) bool { + _, ok := e.(listResourceSchemaInterceptor) + return ok + }), func(e any) interceptorFunc[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse] { + return e.(listResourceSchemaInterceptor).schema + }) +} // when represents the point in the CRUD request lifecycle that an interceptor is run. // Multiple values can be ORed together. @@ -270,6 +300,9 @@ const ( Finally // Interceptor is invoked after After or OnError ) +// Only generate strings for use in tests +//go:generate stringer -type=when -output=when_string_test.go + // An action interceptor is functionality invoked during the action's lifecycle. // If a Before interceptor returns Diagnostics indicating an error occurred then // no further interceptors in the chain are run and neither is the schema's method. @@ -320,7 +353,8 @@ type interceptedRequest interface { resource.UpdateRequest | resource.DeleteRequest | resource.ModifyPlanRequest | - resource.ImportStateRequest + resource.ImportStateRequest | + list.ListResourceSchemaRequest } // interceptedResponse represents a Plugin Framework response type that can be intercepted. @@ -339,7 +373,8 @@ type interceptedResponse interface { resource.UpdateResponse | resource.DeleteResponse | resource.ModifyPlanResponse | - resource.ImportStateResponse + resource.ImportStateResponse | + list.ListResourceSchemaResponse } type innerFunc[Request, Response any] func(ctx context.Context, request Request, response *Response) @@ -444,3 +479,73 @@ func actionSchemaHasError(response *action.SchemaResponse) bool { func actionInvokeHasError(response *action.InvokeResponse) bool { return response.Diagnostics.HasError() } + +func listResourceConfigSchemaHasError(response *list.ListResourceSchemaResponse) bool { + return response.Diagnostics.HasError() +} + +func interceptedListHandler(interceptors []listInterceptorFunc[list.ListRequest, list.ListResultsStream], f func(context.Context, list.ListRequest, *list.ListResultsStream), c awsClient) func(context.Context, list.ListRequest, *list.ListResultsStream) { + return func(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + opts := interceptorOptions[list.ListRequest, list.ListResultsStream]{ + c: c, + request: &request, + response: stream, + } + + // Before interceptors are run first to last. + opts.when = Before + for v := range slices.Values(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return + } + } + + // Stash `stream.Results` so that inner function can be unaware of interceptors. + resultStream := stream.Results + stream.Results = nil + + f(ctx, request, stream) + innerResultStream := stream.Results + + stream.Results = tfiter.Concat(resultStream, func(yield func(list.ListResult) bool) { + var hasError bool + for v := range innerResultStream { + if v.Diagnostics.HasError() { + hasError = true + } + if !yield(v) { + return + } + } + + // All other interceptors are run last to first. + if hasError { + opts.when = OnError + } else { + opts.when = After + } + for v := range tfslices.BackwardValues(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + if !yield(list.ListResult{Diagnostics: diags}) { + return + } + } + } + + opts.when = Finally + for v := range tfslices.BackwardValues(interceptors) { + diags := v(ctx, opts) + if len(diags) > 0 { + if !yield(list.ListResult{Diagnostics: diags}) { + return + } + } + } + }) + } +} diff --git a/internal/provider/framework/intercept_test.go b/internal/provider/framework/intercept_test.go index 995dae00f38d..b0b44dc44860 100644 --- a/internal/provider/framework/intercept_test.go +++ b/internal/provider/framework/intercept_test.go @@ -10,610 +10,609 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/resource" ) -func TestInterceptedHandler_Diags_FirstHasBeforeError(t *testing.T) { +func TestInterceptedHandler(t *testing.T) { t.Parallel() - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), - }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{}) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - client := mockClient{ accountID: "123456789012", region: "us-west-2", //lintignore:AWSAT003 } - var f mockInnerFunc - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + testcases := map[string]struct { + firstInterceptorDiags map[when]diag.Diagnostics + secondInterceptorDiags map[when]diag.Diagnostics + innerFuncDiags diag.Diagnostics + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedDiags diag.Diagnostics + }{ + "First has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, }, - } - - handler(ctx, request, &response) - - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before}) { - t.Errorf("expected first interceptor to be called once, got %v", first.called) - } - if !slices.Equal(second.called, []when{}) { - t.Errorf("expected second interceptor to not be called, got %v", second.called) - } - if f.count != 0 { - t.Errorf("expected inner function to not be called, got %d", f.count) - } -} -func TestInterceptedHandler_Diags_SecondHasBeforeError(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{}) - second := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + "Second has Before error": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, }, - }) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - - var f mockInnerFunc - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, }, - } - - handler(ctx, request, &response) - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before}) { - t.Errorf("expected first interceptor to be called once, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before}) { - t.Errorf("expected second interceptor to be called once, got %v", second.called) - } - if f.count != 0 { - t.Errorf("expected inner function to not be called, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_FirstHasBeforeWarning(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + "Second has Before warning": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{}) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - var f mockInnerFunc - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "First has Before warning Second has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, }, - } - - handler(ctx, request, &response) - - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before, After, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, After, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_SecondHasBeforeWarning(t *testing.T) { - t.Parallel() - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{}) - second := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + "Inner has error": { + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, }, - }) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - - var f mockInnerFunc - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "Inner has warning": { + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, }, - } - - handler(ctx, request, &response) - - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before, After, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, After, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_FirstHasBeforeWarning_SecondHasBeforeError(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), - } - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + "Inner has error First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, }, - }) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - - var f mockInnerFunc - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "All have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, }, - } - - handler(ctx, request, &response) - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) + "Inner has error Handlers have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, } - if !slices.Equal(first.called, []when{Before}) { - t.Errorf("expected first interceptor to be called once, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before}) { - t.Errorf("expected second interceptor to be called once, got %v", second.called) - } - if f.count != 0 { - t.Errorf("expected inner function to not be called, got %d", f.count) + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockInterceptor(tc.firstInterceptorDiags) + second := newMockInterceptor(tc.secondInterceptorDiags) + interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ + first.Intercept, + second.Intercept, + } + + f := newMockInnerFunc(tc.innerFuncDiags) + + handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) + + ctx := t.Context() + var request resource.SchemaRequest + response := resource.SchemaResponse{ + Diagnostics: diag.Diagnostics{ + diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + }, + } + tc.expectedDiags = slices.Insert(tc.expectedDiags, 0, diag.Diagnostic(diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"))) + + handler(ctx, request, &response) + + if diff := cmp.Diff(response.Diagnostics, tc.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) } } -func TestInterceptedHandler_Diags_InnerHasError(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), - } - - first := mockInterceptor{} - second := mockInterceptor{} - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } +type mockInterceptor struct { + diags map[when]diag.Diagnostics + called []when +} - var f mockInnerFunc - f.diags = diag.Diagnostics{ - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), +func newMockInterceptor(diags map[when]diag.Diagnostics) *mockInterceptor { + return &mockInterceptor{ + diags: diags, } - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) +} - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - }, - } +func (m *mockInterceptor) Intercept(ctx context.Context, opts interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) { + m.called = append(m.called, opts.when) + opts.response.Diagnostics.Append(m.diags[opts.when]...) +} - handler(ctx, request, &response) +type mockInnerFunc struct { + diags diag.Diagnostics + count int +} - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) +func newMockInnerFunc(diags diag.Diagnostics) mockInnerFunc { + return mockInnerFunc{ + diags: diags, } +} - if !slices.Equal(first.called, []when{Before, OnError, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, OnError, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } +func (m *mockInnerFunc) Call(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + m.count++ + response.Diagnostics.Append(m.diags...) } -func TestInterceptedHandler_Diags_InnerHasWarning(t *testing.T) { +func TestInterceptedListHandler(t *testing.T) { t.Parallel() - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), - } - - first := mockInterceptor{} - second := mockInterceptor{} - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - client := mockClient{ accountID: "123456789012", region: "us-west-2", //lintignore:AWSAT003 } - var f mockInnerFunc - f.diags = diag.Diagnostics{ - diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), - } - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + testcases := map[string]struct { + firstInterceptorDiags map[when]diag.Diagnostics + secondInterceptorDiags map[when]diag.Diagnostics + innerFuncDiags diag.Diagnostics + expectedFirstCalls []when + expectedSecondCalls []when + expectedInnerCalls int + expectedDiags diag.Diagnostics + }{ + "First has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("First interceptor Before error", "An error occurred in the first interceptor Before handler"), + }, }, - } - handler(ctx, request, &response) - - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before, After, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, After, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_InnerHasError_FirstHasBeforeWarning(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + "Second has Before error": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{}) - - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - - var f mockInnerFunc - f.diags = diag.Diagnostics{ - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), - } - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, }, - } - - handler(ctx, request, &response) - - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before, OnError, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, OnError, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_AllHaveWarnings(t *testing.T) { - t.Parallel() - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), - diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), - diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), - diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), - diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), - diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - }, - After: { - diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + "Second has Before warning": { + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, }, - Finally: { - diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), - }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), - }, - After: { - diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), - }, - Finally: { - diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), - }, - }) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - var f mockInnerFunc - f.diags = diag.Diagnostics{ - diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), - } - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) - - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "First has Before warning Second has Before error": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, + }, + expectedFirstCalls: []when{Before}, + expectedSecondCalls: []when{Before}, + expectedInnerCalls: 0, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Second interceptor Before error", "An error occurred in the second interceptor Before handler"), + }, }, - } - - handler(ctx, request, &response) - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) - } - - if !slices.Equal(first.called, []when{Before, After, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, After, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) - } -} - -func TestInterceptedHandler_Diags_InnerHasError_HandlersHaveWarnings(t *testing.T) { - t.Parallel() - - expectedDiags := diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), - diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), - diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), - diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), - diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), - } - - first := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), - }, - OnError: { - diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), - }, - Finally: { - diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), - }, - }) - second := newMockInterceptor(map[when]diag.Diagnostics{ - Before: { - diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + "Inner has error": { + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, }, - OnError: { - diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), - }, - Finally: { - diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), - }, - }) - interceptors := []interceptorFunc[resource.SchemaRequest, resource.SchemaResponse]{ - first.Intercept, - second.Intercept, - } - - client := mockClient{ - accountID: "123456789012", - region: "us-west-2", //lintignore:AWSAT003 - } - var f mockInnerFunc - f.diags = diag.Diagnostics{ - diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), - } - handler := interceptedHandler(interceptors, f.Call, resourceSchemaHasError, client) + "Inner has warning": { + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + }, - ctx := t.Context() - var request resource.SchemaRequest - response := resource.SchemaResponse{ - Diagnostics: diag.Diagnostics{ - diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + "Inner has error First has Before warning": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, }, - } - handler(ctx, request, &response) + "All have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + After: { + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, After, Finally}, + expectedSecondCalls: []when{Before, After, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewWarningDiagnostic("Inner function warning", "A warning occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor After warning", "A warning occurred in the second interceptor After handler"), + diag.NewWarningDiagnostic("First interceptor After warning", "A warning occurred in the first interceptor After handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, - if diff := cmp.Diff(response.Diagnostics, expectedDiags); diff != "" { - t.Errorf("unexpected diagnostics difference: %s", diff) + "Inner has error Handlers have warnings": { + firstInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, + secondInterceptorDiags: map[when]diag.Diagnostics{ + Before: { + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + }, + OnError: { + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + }, + Finally: { + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + }, + }, + innerFuncDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + }, + expectedFirstCalls: []when{Before, OnError, Finally}, + expectedSecondCalls: []when{Before, OnError, Finally}, + expectedInnerCalls: 1, + expectedDiags: diag.Diagnostics{ + diag.NewWarningDiagnostic("First interceptor Before warning", "A warning occurred in the first interceptor Before handler"), + diag.NewWarningDiagnostic("Second interceptor Before warning", "A warning occurred in the second interceptor Before handler"), + diag.NewErrorDiagnostic("Inner function error", "An error occurred in the inner function"), + diag.NewWarningDiagnostic("Second interceptor OnError warning", "A warning occurred in the second interceptor OnError handler"), + diag.NewWarningDiagnostic("First interceptor OnError warning", "A warning occurred in the first interceptor OnError handler"), + diag.NewWarningDiagnostic("Second interceptor Finally warning", "A warning occurred in the second interceptor Finally handler"), + diag.NewWarningDiagnostic("First interceptor Finally warning", "A warning occurred in the first interceptor Finally handler"), + }, + }, } - if !slices.Equal(first.called, []when{Before, OnError, Finally}) { - t.Errorf("expected first interceptor to be called three times, got %v", first.called) - } - if !slices.Equal(second.called, []when{Before, OnError, Finally}) { - t.Errorf("expected second interceptor to be called three times, got %v", second.called) - } - if f.count != 1 { - t.Errorf("expected inner function to be called once, got %d", f.count) + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + first := newMockListInterceptor(tc.firstInterceptorDiags) + second := newMockListInterceptor(tc.secondInterceptorDiags) + interceptors := []listInterceptorFunc[list.ListRequest, list.ListResultsStream]{ + first.Intercept, + second.Intercept, + } + + f := newMockInnerListFunc(tc.innerFuncDiags) + + handler := interceptedListHandler(interceptors, f.Call, client) + + ctx := t.Context() + var request list.ListRequest + response := list.ListResultsStream{ + Results: list.ListResultsStreamDiagnostics(diag.Diagnostics{ + diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"), + }), + } + tc.expectedDiags = slices.Insert(tc.expectedDiags, 0, diag.Diagnostic(diag.NewWarningDiagnostic("Pre-existing warning", "This is a pre-existing warning that should not be affected by the interceptors"))) + + handler(ctx, request, &response) + + var diags diag.Diagnostics + for d := range response.Results { + if len(d.Diagnostics) > 0 { + diags = append(diags, d.Diagnostics...) + } + } + + if diff := cmp.Diff(diags, tc.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diff := cmp.Diff(first.called, tc.expectedFirstCalls); diff != "" { + t.Errorf("unexpected first interceptor calls difference: %s", diff) + } + if diff := cmp.Diff(second.called, tc.expectedSecondCalls); diff != "" { + t.Errorf("unexpected second interceptor calls difference: %s", diff) + } + if tc.expectedInnerCalls == 0 { + if f.count != 0 { + t.Errorf("expected inner function to not be called, got %d", f.count) + } + } else { + if f.count != tc.expectedInnerCalls { + t.Errorf("expected inner function to be called %d times, got %d", tc.expectedInnerCalls, f.count) + } + } + }) } } -type mockInterceptor struct { +type mockListInterceptor struct { diags map[when]diag.Diagnostics called []when } -func newMockInterceptor(diags map[when]diag.Diagnostics) *mockInterceptor { - return &mockInterceptor{ +func newMockListInterceptor(diags map[when]diag.Diagnostics) *mockListInterceptor { + return &mockListInterceptor{ diags: diags, } } -func (m *mockInterceptor) Intercept(ctx context.Context, opts interceptorOptions[resource.SchemaRequest, resource.SchemaResponse]) { +func (m *mockListInterceptor) Intercept(ctx context.Context, opts interceptorOptions[list.ListRequest, list.ListResultsStream]) diag.Diagnostics { m.called = append(m.called, opts.when) - // return m.diags[opts.when] - opts.response.Diagnostics.Append(m.diags[opts.when]...) + return m.diags[opts.when] } -type mockInnerFunc struct { +type mockInnerListFunc struct { diags diag.Diagnostics count int } -func (m *mockInnerFunc) Call(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { +func newMockInnerListFunc(diags diag.Diagnostics) mockInnerListFunc { + return mockInnerListFunc{ + diags: diags, + } +} + +func (m *mockInnerListFunc) Call(ctx context.Context, request list.ListRequest, response *list.ListResultsStream) { m.count++ - response.Diagnostics.Append(m.diags...) + if len(m.diags) > 0 { + response.Results = list.ListResultsStreamDiagnostics(m.diags) + } else { + response.Results = list.NoListResults + } } diff --git a/internal/provider/framework/listresource/list_result_intercept.go b/internal/provider/framework/listresource/list_result_intercept.go new file mode 100644 index 000000000000..c6e316e88aec --- /dev/null +++ b/internal/provider/framework/listresource/list_result_intercept.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listresource + +import ( + "context" + "fmt" + "unique" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/internal/provider/interceptors" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// when represents the point in the CRUD request lifecycle that an interceptor is run. +// Multiple values can be ORed together. +type when uint16 + +const ( + Before when = 1 << iota // Interceptor is invoked before call to method in schema + After // Interceptor is invoked after successful call to method in schema + OnError // Interceptor is invoked after unsuccessful call to method in schema + Finally // Interceptor is invoked after After or OnError +) + +type InterceptorParams struct { + C *conns.AWSClient + Result *list.ListResult + When when +} + +type ListResultInterceptor interface { + Read(ctx context.Context, params InterceptorParams) diag.Diagnostics +} + +// TODO: this could be unique as well +type tagsInterceptor struct { + interceptors.HTags +} + +func TagsInterceptor(tags unique.Handle[inttypes.ServicePackageResourceTags]) tagsInterceptor { + return tagsInterceptor{ + HTags: interceptors.HTags(tags), + } +} + +// Copied from tagsResourceInterceptor.read() +func (r tagsInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + sp, serviceName, resourceName, tagsInContext, ok := interceptors.InfoFromContext(ctx, params.C) + if !ok { + return diags + } + + switch params.When { + case After: + // If the R handler didn't set tags, try and read them from the service API. + if tagsInContext.TagsOut.IsNone() { + // Some old resources may not have the required attribute set after Read: + // https://github.com/hashicorp/terraform-provider-aws/issues/31180 + if identifier := r.GetIdentifierFramework(ctx, params.Result.Resource); identifier != "" { + if err := r.ListTags(ctx, sp, params.C, identifier); err != nil { + diags.AddError(fmt.Sprintf("listing tags for %s %s (%s)", serviceName, resourceName, identifier), err.Error()) + + return diags + } + } + } + + apiTags := tagsInContext.TagsOut.UnwrapOrDefault() + + // AWS APIs often return empty lists of tags when none have been configured. + var stateTags tftags.Map + params.Result.Resource.GetAttribute(ctx, path.Root(names.AttrTags), &stateTags) + // Remove any provider configured ignore_tags and system tags from those returned from the service API. + // The resource's configured tags do not include any provider configured default_tags. + if v := apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(params.C.IgnoreTagsConfig(ctx)).ResolveDuplicatesFramework(ctx, params.C.DefaultTagsConfig(ctx), params.C.IgnoreTagsConfig(ctx), stateTags, &diags).Map(); len(v) > 0 { + stateTags = tftags.NewMapFromMapValue(fwflex.FlattenFrameworkStringValueMapLegacy(ctx, v)) + } + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrTags), &stateTags)...) + if diags.HasError() { + return diags + } + + // Computed tags_all do. + stateTagsAll := fwflex.FlattenFrameworkStringValueMapLegacy(ctx, apiTags.IgnoreSystem(sp.ServicePackageName()).IgnoreConfig(params.C.IgnoreTagsConfig(ctx)).Map()) + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrTagsAll), tftags.NewMapFromMapValue(stateTagsAll))...) + if diags.HasError() { + return diags + } + } + + return diags +} + +type identityInterceptor struct { + attributes []inttypes.IdentityAttribute +} + +func IdentityInterceptor(attributes []inttypes.IdentityAttribute) identityInterceptor { + return identityInterceptor{ + attributes: attributes, + } +} + +func (r identityInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + awsClient := params.C + + switch params.When { + // The Before step is not needed if Framework pre-populates the Identity as it does with CRUD operations + case Before: + identityType := params.Result.Identity.Schema.Type() + + obj, d := newEmptyObject(identityType) + diags.Append(d...) + if diags.HasError() { + return diags + } + + diags.Append(params.Result.Identity.Set(ctx, obj)...) + if diags.HasError() { + return diags + } + + case After: + for _, att := range r.attributes { + switch att.Name() { + case names.AttrAccountID: + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.AccountID(ctx))...) + if diags.HasError() { + return diags + } + + case names.AttrRegion: + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), awsClient.Region(ctx))...) + if diags.HasError() { + return diags + } + + default: + var attrVal attr.Value + diags.Append(params.Result.Resource.GetAttribute(ctx, path.Root(att.ResourceAttributeName()), &attrVal)...) + if diags.HasError() { + return diags + } + + diags.Append(params.Result.Identity.SetAttribute(ctx, path.Root(att.Name()), attrVal)...) + if diags.HasError() { + return diags + } + } + } + } + + return diags +} + +func newEmptyObject(typ attr.Type) (obj basetypes.ObjectValue, diags diag.Diagnostics) { + i, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + diags.AddError( + "Internal Error", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected value type to implement attr.TypeWithAttributeTypes, got: %T", typ), + ) + return + } + + attrTypes := i.AttributeTypes() + attrValues := make(map[string]attr.Value, len(attrTypes)) + // TODO: only handles string types + for attrName := range attrTypes { + attrValues[attrName] = types.StringNull() + } + obj, d := basetypes.NewObjectValue(attrTypes, attrValues) + diags.Append(d...) + if d.HasError() { + return basetypes.ObjectValue{}, diags + } + + return obj, diags +} + +type setRegionInterceptor struct{} + +func SetRegionInterceptor() setRegionInterceptor { + return setRegionInterceptor{} +} + +// Copied from resourceSetRegionInStateInterceptor.read() +func (r setRegionInterceptor) Read(ctx context.Context, params InterceptorParams) diag.Diagnostics { + var diags diag.Diagnostics + + switch params.When { + case After: + diags.Append(params.Result.Resource.SetAttribute(ctx, path.Root(names.AttrRegion), params.C.Region(ctx))...) + if diags.HasError() { + return diags + } + } + + return diags +} diff --git a/internal/provider/framework/listresourceattribute/attributes.go b/internal/provider/framework/listresourceattribute/attributes.go new file mode 100644 index 000000000000..5dbd4c5c83d7 --- /dev/null +++ b/internal/provider/framework/listresourceattribute/attributes.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listresourceattribute + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-provider-aws/names" +) + +var Region = sync.OnceValue(func() schema.Attribute { + return schema.StringAttribute{ + Optional: true, + Description: names.ListResourceTopLevelRegionAttributeDescription, + } +}) diff --git a/internal/provider/framework/provider.go b/internal/provider/framework/provider.go index 2c70f3bd53da..36df45dc55f8 100644 --- a/internal/provider/framework/provider.go +++ b/internal/provider/framework/provider.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/ephemeral" empemeralschema "github.com/hashicorp/terraform-plugin-framework/ephemeral/schema" "github.com/hashicorp/terraform-plugin-framework/function" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -47,12 +48,14 @@ var ( _ provider.ProviderWithActions = &frameworkProvider{} _ provider.ProviderWithFunctions = &frameworkProvider{} _ provider.ProviderWithEphemeralResources = &frameworkProvider{} + _ provider.ProviderWithListResources = &frameworkProvider{} ) type frameworkProvider struct { actions []func() action.Action dataSources []func() datasource.DataSource ephemeralResources []func() ephemeral.EphemeralResource + listResources []func() list.ListResource primary interface{ Meta() any } resources []func() resource.Resource servicePackages iter.Seq[conns.ServicePackage] @@ -350,6 +353,7 @@ func (p *frameworkProvider) Configure(ctx context.Context, request provider.Conf response.ResourceData = v response.EphemeralResourceData = v response.ActionData = v + response.ListResourceData = v } // DataSources returns a slice of functions to instantiate each DataSource @@ -397,6 +401,10 @@ func (p *frameworkProvider) Functions(_ context.Context) []func() function.Funct } } +func (p *frameworkProvider) ListResources(_ context.Context) []func() list.ListResource { + return slices.Clone(p.listResources) +} + // initialize is called from `New` to perform any Terraform Framework-style initialization. func (p *frameworkProvider) initialize(ctx context.Context) { log.Printf("Initializing Terraform AWS Provider (Framework-style)...") @@ -418,6 +426,21 @@ func (p *frameworkProvider) initialize(ctx context.Context) { } } + if v, ok := sp.(conns.ServicePackageWithFrameworkListResources); ok { + for listResourceSpec := range v.FrameworkListResources(ctx) { + p.listResources = append(p.listResources, func() list.ListResource { //nolint:contextcheck // must be a func() + return newWrappedListResourceFramework(listResourceSpec, servicePackageName) + }) + } + } + if v, ok := sp.(conns.ServicePackageWithSDKListResources); ok { + for listResourceSpec := range v.SDKListResources(ctx) { + p.listResources = append(p.listResources, func() list.ListResource { //nolint:contextcheck // must be a func() + return newWrappedListResourceSDK(listResourceSpec, servicePackageName) + }) + } + } + for _, resourceSpec := range sp.FrameworkResources(ctx) { p.resources = append(p.resources, func() resource.Resource { //nolint:contextcheck // must be a func() return newWrappedResource(resourceSpec, servicePackageName) diff --git a/internal/provider/framework/region.go b/internal/provider/framework/region.go index 8243c4668777..32302199366a 100644 --- a/internal/provider/framework/region.go +++ b/internal/provider/framework/region.go @@ -14,9 +14,11 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" erschema "github.com/hashicorp/terraform-plugin-framework/ephemeral/schema" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresourceattribute" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/resourceattribute" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -41,7 +43,7 @@ func (r dataSourceInjectRegionAttributeInterceptor) schema(ctx context.Context, response.Schema.Attributes[names.AttrRegion] = dsschema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } } } @@ -110,7 +112,7 @@ func (r ephemeralResourceInjectRegionAttributeInterceptor) schema(ctx context.Co response.Schema.Attributes[names.AttrRegion] = erschema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } } } @@ -379,7 +381,7 @@ func (a actionInjectRegionAttributeInterceptor) schema(ctx context.Context, opts } response.Schema.Attributes[names.AttrRegion] = aschema.StringAttribute{ Optional: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ActionTopLevelRegionAttributeDescription, } } } @@ -406,3 +408,20 @@ func (a actionValidateRegionInterceptor) invoke(ctx context.Context, opts interc func actionValidateRegion() actionInvokeInterceptor { return &actionValidateRegionInterceptor{} } + +type listResourceInjectRegionAttributeInterceptor struct{} + +func (r listResourceInjectRegionAttributeInterceptor) schema(ctx context.Context, opts interceptorOptions[list.ListResourceSchemaRequest, list.ListResourceSchemaResponse]) { + switch response, when := opts.response, opts.when; when { + case After: + if _, ok := response.Schema.Attributes[names.AttrRegion]; !ok { + // Inject a top-level "region" attribute. + response.Schema.Attributes[names.AttrRegion] = listresourceattribute.Region() + } + } +} + +// listResourceInjectRegionAttribute injects a "region" attribute into a resource's List schema. +func listResourceInjectRegionAttribute() listResourceSchemaInterceptor { + return &listResourceInjectRegionAttributeInterceptor{} +} diff --git a/internal/provider/framework/resourceattribute/attributes.go b/internal/provider/framework/resourceattribute/attributes.go index eb5543522b84..f216b24d87b9 100644 --- a/internal/provider/framework/resourceattribute/attributes.go +++ b/internal/provider/framework/resourceattribute/attributes.go @@ -14,6 +14,6 @@ var Region = sync.OnceValue(func() schema.Attribute { return schema.StringAttribute{ Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } }) diff --git a/internal/provider/framework/wrap.go b/internal/provider/framework/wrap.go index acb0de29adcc..d5f56a5adb93 100644 --- a/internal/provider/framework/wrap.go +++ b/internal/provider/framework/wrap.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" @@ -18,9 +19,11 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" "github.com/hashicorp/terraform-provider-aws/internal/logging" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/identity" "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/importer" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" tfunique "github.com/hashicorp/terraform-provider-aws/internal/unique" @@ -545,15 +548,18 @@ func newWrappedResource(spec *inttypes.ServicePackageFrameworkResource, serviceP interceptors = append(interceptors, resourceTransparentTagging(spec.Tags)) } + inner, _ := spec.Factory(context.TODO()) + if len(spec.Identity.Attributes) > 0 { interceptors = append(interceptors, newIdentityInterceptor(spec.Identity.Attributes)) + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } } - inner, _ := spec.Factory(context.TODO()) - if spec.Import.WrappedImport { if v, ok := inner.(framework.ImportByIdentityer); ok { - v.SetIdentitySpec(spec.Identity, spec.Import) + v.SetImportSpec(spec.Import) } // If the resource does not implement framework.ImportByIdentityer, // it will be caught by `validateResourceSchemas`, so we can ignore it here. @@ -787,3 +793,266 @@ func (w *wrappedResource) IdentitySchema(ctx context.Context, req resource.Ident resp.IdentitySchema = identity.NewIdentitySchema(w.spec.Identity) } } + +type wrappedListResourceFramework struct { + inner list.ListResourceWithConfigure + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageFrameworkListResource + interceptors interceptorInvocations +} + +var _ list.ListResourceWithConfigure = &wrappedListResourceFramework{} + +func newWrappedListResourceFramework(spec *inttypes.ServicePackageFrameworkListResource, servicePackageName string) list.ListResourceWithConfigure { + var interceptors interceptorInvocations + + var isRegionOverrideEnabled bool + if regionSpec := spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled { + interceptors = append(interceptors, listResourceInjectRegionAttribute()) + // TODO: validate region in partition, needs tweaked error message + } + + inner := spec.Factory() + + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } + + if v, ok := inner.(framework.Lister); ok { + if isRegionOverrideEnabled { + v.AppendResultInterceptor(listresource.SetRegionInterceptor()) + } + + v.AppendResultInterceptor(listresource.IdentityInterceptor(spec.Identity.Attributes)) + + if !tfunique.IsHandleNil(spec.Tags) { + v.AppendResultInterceptor(listresource.TagsInterceptor(spec.Tags)) + } + } + + return &wrappedListResourceFramework{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } +} + +// context is run on all wrapped methods before any interceptors. +func (w *wrappedListResourceFramework) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + if target.IsNull() || target.IsUnknown() { + overrideRegion = c.AwsConfig(ctx).Region + } else { + overrideRegion = target.ValueString() + } + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + +func (w *wrappedListResourceFramework) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + w.meta = v + } + + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + w.inner.Configure(ctx, request, response) +} + +func (w *wrappedListResourceFramework) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + stream.Results = tfiter.Null[list.ListResult]() + + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return + } + + interceptedListHandler(w.interceptors.resourceList(), w.inner.List, w.meta)(ctx, request, stream) +} + +// ListResourceConfigSchema implements list.ListResourceWithConfigure. +func (w *wrappedListResourceFramework) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + interceptedHandler(w.interceptors.resourceListResourceConfigSchema(), w.inner.ListResourceConfigSchema, listResourceConfigSchemaHasError, w.meta)(ctx, request, response) +} + +// Metadata implements list.ListResourceWithConfigure. +func (w *wrappedListResourceFramework) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + // This method does not call down to the inner resource. + response.TypeName = w.spec.TypeName +} + +type wrappedListResourceSDK struct { + inner inttypes.ListResourceForSDK + meta *conns.AWSClient + servicePackageName string + spec *inttypes.ServicePackageSDKListResource + interceptors interceptorInvocations +} + +var _ inttypes.ListResourceForSDK = &wrappedListResourceSDK{} + +func newWrappedListResourceSDK(spec *inttypes.ServicePackageSDKListResource, servicePackageName string) inttypes.ListResourceForSDK { + var interceptors interceptorInvocations + + if v := spec.Region; !tfunique.IsHandleNil(v) && v.Value().IsOverrideEnabled { + interceptors = append(interceptors, listResourceInjectRegionAttribute()) + // TODO: validate region in partition, needs tweaked error message + } + + inner := spec.Factory() + + if v, ok := inner.(framework.WithRegionSpec); ok { + v.SetRegionSpec(spec.Region) + } + + if v, ok := inner.(framework.Identityer); ok { + v.SetIdentitySpec(spec.Identity) + } + + if v, ok := inner.(inttypes.SDKv2Tagger); ok { + if !tfunique.IsHandleNil(spec.Tags) { + v.SetTagsSpec(spec.Tags) + } + } + + return &wrappedListResourceSDK{ + inner: inner, + servicePackageName: servicePackageName, + spec: spec, + interceptors: interceptors, + } +} + +// Metadata implements list.ListResourceWithConfigure. +func (w *wrappedListResourceSDK) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + // This method does not call down to the inner resource. + response.TypeName = w.spec.TypeName +} + +// context is run on all wrapped methods before any interceptors. +func (w *wrappedListResourceSDK) context(ctx context.Context, getAttribute getAttributeFunc, c *conns.AWSClient) (context.Context, diag.Diagnostics) { + var diags diag.Diagnostics + var overrideRegion string + + var isRegionOverrideEnabled bool + if regionSpec := w.spec.Region; !tfunique.IsHandleNil(regionSpec) && regionSpec.Value().IsOverrideEnabled { + isRegionOverrideEnabled = true + } + + if isRegionOverrideEnabled && getAttribute != nil { + var target types.String + diags.Append(getAttribute(ctx, path.Root(names.AttrRegion), &target)...) + if diags.HasError() { + return ctx, diags + } + + if target.IsNull() || target.IsUnknown() { + overrideRegion = c.AwsConfig(ctx).Region + } else { + overrideRegion = target.ValueString() + } + } + + ctx = conns.NewResourceContext(ctx, w.servicePackageName, w.spec.Name, overrideRegion) + if c != nil { + ctx = tftags.NewContext(ctx, c.DefaultTagsConfig(ctx), c.IgnoreTagsConfig(ctx)) + ctx = c.RegisterLogger(ctx) + ctx = fwflex.RegisterLogger(ctx) + } + + return ctx, diags +} + +func (w *wrappedListResourceSDK) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + if v, ok := request.ProviderData.(*conns.AWSClient); ok { + w.meta = v + } + + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + w.inner.Configure(ctx, request, response) +} + +func (w *wrappedListResourceSDK) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + stream.Results = tfiter.Null[list.ListResult]() + + ctx, diags := w.context(ctx, request.Config.GetAttribute, w.meta) + if len(diags) > 0 { + stream.Results = tfiter.Concat(stream.Results, list.ListResultsStreamDiagnostics(diags)) + } + if diags.HasError() { + return + } + + interceptedListHandler(w.interceptors.resourceList(), w.inner.List, w.meta)(ctx, request, stream) +} + +// ListResourceConfigSchema implements list.ListResourceWithConfigure. +func (w *wrappedListResourceSDK) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + ctx, diags := w.context(ctx, nil, w.meta) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + interceptedHandler(w.interceptors.resourceListResourceConfigSchema(), w.inner.ListResourceConfigSchema, listResourceConfigSchemaHasError, w.meta)(ctx, request, response) +} + +func (w *wrappedListResourceSDK) RawV5Schemas(ctx context.Context, request list.RawV5SchemaRequest, response *list.RawV5SchemaResponse) { + if v, ok := w.inner.(list.ListResourceWithRawV5Schemas); ok { + ctx, diags := w.context(ctx, nil, w.meta) + if diags.HasError() { + tflog.Warn(ctx, "wrapping Schemas", map[string]any{ + "resource": w.spec.TypeName, + "bootstrapContext error": fwdiag.DiagnosticsString(diags), + }) + } + + v.RawV5Schemas(ctx, request, response) + } +} diff --git a/internal/provider/sdkv2/internal/attribute/attributes.go b/internal/provider/sdkv2/internal/attribute/attributes.go index 8dbe4a46850c..b9e9af2dc5a5 100644 --- a/internal/provider/sdkv2/internal/attribute/attributes.go +++ b/internal/provider/sdkv2/internal/attribute/attributes.go @@ -15,6 +15,6 @@ var Region = sync.OnceValue(func() *schema.Schema { Type: schema.TypeString, Optional: true, Computed: true, - Description: names.TopLevelRegionAttributeDescription, + Description: names.ResourceTopLevelRegionAttributeDescription, } }) diff --git a/internal/service/appsync/appsync_test.go b/internal/service/appsync/appsync_test.go index 321f2c8ccff8..e49f4b9ec293 100644 --- a/internal/service/appsync/appsync_test.go +++ b/internal/service/appsync/appsync_test.go @@ -18,60 +18,6 @@ func TestAccAppSync_serial(t *testing.T) { "description": testAccAPIKey_description, "expires": testAccAPIKey_expires, }, - "DataSource": { - acctest.CtBasic: testAccDataSource_basic, - "description": testAccDataSource_description, - "DynamoDB_region": testAccDataSource_DynamoDB_region, - "DynamoDB_useCallerCredentials": testAccDataSource_DynamoDB_useCallerCredentials, - "HTTP_endpoint": testAccDataSource_HTTP_endpoint, - "type": testAccDataSource_type, - "Type_dynamoDB": testAccDataSource_Type_dynamoDB, - "Type_http": testAccDataSource_Type_http, - "Type_http_auth": testAccDataSource_Type_httpAuth, - "Type_lambda": testAccDataSource_Type_lambda, - "Type_none": testAccDataSource_Type_none, - "Type_rdbms": testAccDataSource_Type_relationalDatabase, - "Type_rdbms_options": testAccDataSource_Type_relationalDatabaseWithOptions, - "Type_eventBridge": testAccDataSource_Type_eventBridge, - }, - "GraphQLAPI": { - acctest.CtBasic: testAccGraphQLAPI_basic, - acctest.CtDisappears: testAccGraphQLAPI_disappears, - "tags": testAccGraphQLAPI_tags, - "schema": testAccGraphQLAPI_schema, - "apiType": testAccGraphQLAPI_apiType, - "authenticationType": testAccGraphQLAPI_authenticationType, - "AuthenticationType_apiKey": testAccGraphQLAPI_AuthenticationType_apiKey, - "AuthenticationType_awsIAM": testAccGraphQLAPI_AuthenticationType_iam, - "AuthenticationType_amazonCognitoUserPools": testAccGraphQLAPI_AuthenticationType_amazonCognitoUserPools, - "AuthenticationType_openIDConnect": testAccGraphQLAPI_AuthenticationType_openIDConnect, - "AuthenticationType_awsLambda": testAccGraphQLAPI_AuthenticationType_lambda, - "enhancedMetricsConfig": testAccGraphQLAPI_enhancedMetricsConfig, - "log": testAccGraphQLAPI_log, - "Log_fieldLogLevel": testAccGraphQLAPI_Log_fieldLogLevel, - "Log_excludeVerboseContent": testAccGraphQLAPI_Log_excludeVerboseContent, - "OpenIDConnect_authTTL": testAccGraphQLAPI_OpenIDConnect_authTTL, - "OpenIDConnect_clientID": testAccGraphQLAPI_OpenIDConnect_clientID, - "OpenIDConnect_iatTTL": testAccGraphQLAPI_OpenIDConnect_iatTTL, - "OpenIDConnect_issuer": testAccGraphQLAPI_OpenIDConnect_issuer, - acctest.CtName: testAccGraphQLAPI_name, - "UserPool_awsRegion": testAccGraphQLAPI_UserPool_region, - "UserPool_defaultAction": testAccGraphQLAPI_UserPool_defaultAction, - "LambdaAuthorizerConfig_authorizerUri": testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerURI, - "LambdaAuthorizerConfig_identityValidationExpression": testAccGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression, - "LambdaAuthorizerConfig_authorizerResultTtlInSeconds": testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds, - "AdditionalAuthentication_apiKey": testAccGraphQLAPI_AdditionalAuthentication_apiKey, - "AdditionalAuthentication_awsIAM": testAccGraphQLAPI_AdditionalAuthentication_iam, - "AdditionalAuthentication_cognitoUserPools": testAccGraphQLAPI_AdditionalAuthentication_cognitoUserPools, - "AdditionalAuthentication_openIDConnect": testAccGraphQLAPI_AdditionalAuthentication_openIDConnect, - "AdditionalAuthentication_awsLambda": testAccGraphQLAPI_AdditionalAuthentication_lambda, - "AdditionalAuthentication_multiple": testAccGraphQLAPI_AdditionalAuthentication_multiple, - "xrayEnabled": testAccGraphQLAPI_xrayEnabled, - "visibility": testAccGraphQLAPI_visibility, - "introspectionConfig": testAccGraphQLAPI_introspectionConfig, - "queryDepthLimit": testAccGraphQLAPI_queryDepthLimit, - "resolverCountLimit": testAccGraphQLAPI_resolverCountLimit, - }, "Function": { acctest.CtBasic: testAccFunction_basic, "code": testAccFunction_code, diff --git a/internal/service/appsync/datasource_test.go b/internal/service/appsync/datasource_test.go index 40b68fa510a6..cdee0bef6892 100644 --- a/internal/service/appsync/datasource_test.go +++ b/internal/service/appsync/datasource_test.go @@ -20,12 +20,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func testAccDataSource_basic(t *testing.T) { +func TestAccAppSyncDataSource_basic(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -57,12 +57,12 @@ func testAccDataSource_basic(t *testing.T) { }) } -func testAccDataSource_description(t *testing.T) { +func TestAccAppSyncDataSource_description(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -91,12 +91,12 @@ func testAccDataSource_description(t *testing.T) { }) } -func testAccDataSource_DynamoDB_region(t *testing.T) { +func TestAccAppSyncDataSource_DynamoDB_region(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -127,12 +127,12 @@ func testAccDataSource_DynamoDB_region(t *testing.T) { }) } -func testAccDataSource_DynamoDB_useCallerCredentials(t *testing.T) { +func TestAccAppSyncDataSource_DynamoDB_useCallerCredentials(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -237,12 +237,12 @@ func TestAccAppSyncDataSource_OpenSearchService_region(t *testing.T) { }) } -func testAccDataSource_HTTP_endpoint(t *testing.T) { +func TestAccAppSyncDataSource_HTTP_endpoint(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -275,12 +275,12 @@ func testAccDataSource_HTTP_endpoint(t *testing.T) { }) } -func testAccDataSource_type(t *testing.T) { +func TestAccAppSyncDataSource_type(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -304,14 +304,14 @@ func testAccDataSource_type(t *testing.T) { }) } -func testAccDataSource_Type_dynamoDB(t *testing.T) { +func TestAccAppSyncDataSource_Type_dynamoDB(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) dynamodbTableResourceName := "aws_dynamodb_table.test" iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -403,12 +403,12 @@ func TestAccAppSyncDataSource_Type_openSearchService(t *testing.T) { }) } -func testAccDataSource_Type_http(t *testing.T) { +func TestAccAppSyncDataSource_Type_http(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -432,12 +432,12 @@ func testAccDataSource_Type_http(t *testing.T) { }) } -func testAccDataSource_Type_httpAuth(t *testing.T) { +func TestAccAppSyncDataSource_Type_httpAuth(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -466,12 +466,12 @@ func testAccDataSource_Type_httpAuth(t *testing.T) { }) } -func testAccDataSource_Type_relationalDatabase(t *testing.T) { +func TestAccAppSyncDataSource_Type_relationalDatabase(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -495,12 +495,12 @@ func testAccDataSource_Type_relationalDatabase(t *testing.T) { }) } -func testAccDataSource_Type_relationalDatabaseWithOptions(t *testing.T) { +func TestAccAppSyncDataSource_Type_relationalDatabaseWithOptions(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -526,14 +526,14 @@ func testAccDataSource_Type_relationalDatabaseWithOptions(t *testing.T) { }) } -func testAccDataSource_Type_lambda(t *testing.T) { +func TestAccAppSyncDataSource_Type_lambda(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) iamRoleResourceName := "aws_iam_role.test" lambdaFunctionResourceName := "aws_lambda_function.test" resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -558,14 +558,14 @@ func testAccDataSource_Type_lambda(t *testing.T) { }) } -func testAccDataSource_Type_eventBridge(t *testing.T) { +func TestAccAppSyncDataSource_Type_eventBridge(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) iamRoleResourceName := "aws_iam_role.test" eventBusResourceName := "aws_cloudwatch_event_bus.test" resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -590,12 +590,12 @@ func testAccDataSource_Type_eventBridge(t *testing.T) { }) } -func testAccDataSource_Type_none(t *testing.T) { +func TestAccAppSyncDataSource_Type_none(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_datasource.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1223,16 +1223,10 @@ resource "aws_secretsmanager_secret_version" "test" { resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q engine = %[2]q - engine_mode = "serverless" database_name = "mydb" master_username = "foo" master_password = "mustbeeightcharaters" skip_final_snapshot = true - - scaling_configuration { - min_capacity = 1 - max_capacity = 2 - } } resource "aws_iam_role" "test" { diff --git a/internal/service/appsync/graphql_api_test.go b/internal/service/appsync/graphql_api_test.go index 95d15931806b..66273a3400b3 100644 --- a/internal/service/appsync/graphql_api_test.go +++ b/internal/service/appsync/graphql_api_test.go @@ -21,13 +21,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func testAccGraphQLAPI_basic(t *testing.T) { +func TestAccAppSyncGraphQLAPI_basic(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -64,13 +64,13 @@ func testAccGraphQLAPI_basic(t *testing.T) { }) } -func testAccGraphQLAPI_disappears(t *testing.T) { +func TestAccAppSyncGraphQLAPI_disappears(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -88,13 +88,13 @@ func testAccGraphQLAPI_disappears(t *testing.T) { }) } -func testAccGraphQLAPI_schema(t *testing.T) { +func TestAccAppSyncGraphQLAPI_schema(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -135,13 +135,13 @@ func testAccGraphQLAPI_schema(t *testing.T) { }) } -func testAccGraphQLAPI_authenticationType(t *testing.T) { +func TestAccAppSyncGraphQLAPI_authenticationType(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -170,13 +170,13 @@ func testAccGraphQLAPI_authenticationType(t *testing.T) { }) } -func testAccGraphQLAPI_AuthenticationType_apiKey(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AuthenticationType_apiKey(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -200,13 +200,13 @@ func testAccGraphQLAPI_AuthenticationType_apiKey(t *testing.T) { }) } -func testAccGraphQLAPI_AuthenticationType_iam(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AuthenticationType_iam(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -230,14 +230,14 @@ func testAccGraphQLAPI_AuthenticationType_iam(t *testing.T) { }) } -func testAccGraphQLAPI_AuthenticationType_amazonCognitoUserPools(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AuthenticationType_amazonCognitoUserPools(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -263,13 +263,13 @@ func testAccGraphQLAPI_AuthenticationType_amazonCognitoUserPools(t *testing.T) { }) } -func testAccGraphQLAPI_AuthenticationType_openIDConnect(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AuthenticationType_openIDConnect(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -293,14 +293,14 @@ func testAccGraphQLAPI_AuthenticationType_openIDConnect(t *testing.T) { }) } -func testAccGraphQLAPI_AuthenticationType_lambda(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AuthenticationType_lambda(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -326,13 +326,13 @@ func testAccGraphQLAPI_AuthenticationType_lambda(t *testing.T) { }) } -func testAccGraphQLAPI_enhancedMetricsConfig(t *testing.T) { +func TestAccAppSyncGraphQLAPI_enhancedMetricsConfig(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -372,14 +372,14 @@ func testAccGraphQLAPI_enhancedMetricsConfig(t *testing.T) { }) } -func testAccGraphQLAPI_log(t *testing.T) { +func TestAccAppSyncGraphQLAPI_log(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -404,14 +404,14 @@ func testAccGraphQLAPI_log(t *testing.T) { }) } -func testAccGraphQLAPI_Log_fieldLogLevel(t *testing.T) { +func TestAccAppSyncGraphQLAPI_Log_fieldLogLevel(t *testing.T) { ctx := acctest.Context(t) var api1, api2, api3 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -456,14 +456,14 @@ func testAccGraphQLAPI_Log_fieldLogLevel(t *testing.T) { }) } -func testAccGraphQLAPI_Log_excludeVerboseContent(t *testing.T) { +func TestAccAppSyncGraphQLAPI_Log_excludeVerboseContent(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -499,13 +499,13 @@ func testAccGraphQLAPI_Log_excludeVerboseContent(t *testing.T) { }) } -func testAccGraphQLAPI_OpenIDConnect_authTTL(t *testing.T) { +func TestAccAppSyncGraphQLAPI_OpenIDConnect_authTTL(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -540,13 +540,13 @@ func testAccGraphQLAPI_OpenIDConnect_authTTL(t *testing.T) { }) } -func testAccGraphQLAPI_OpenIDConnect_clientID(t *testing.T) { +func TestAccAppSyncGraphQLAPI_OpenIDConnect_clientID(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -581,13 +581,13 @@ func testAccGraphQLAPI_OpenIDConnect_clientID(t *testing.T) { }) } -func testAccGraphQLAPI_OpenIDConnect_iatTTL(t *testing.T) { +func TestAccAppSyncGraphQLAPI_OpenIDConnect_iatTTL(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -622,13 +622,13 @@ func testAccGraphQLAPI_OpenIDConnect_iatTTL(t *testing.T) { }) } -func testAccGraphQLAPI_OpenIDConnect_issuer(t *testing.T) { +func TestAccAppSyncGraphQLAPI_OpenIDConnect_issuer(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -661,14 +661,14 @@ func testAccGraphQLAPI_OpenIDConnect_issuer(t *testing.T) { }) } -func testAccGraphQLAPI_name(t *testing.T) { +func TestAccAppSyncGraphQLAPI_name(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -692,14 +692,14 @@ func testAccGraphQLAPI_name(t *testing.T) { }) } -func testAccGraphQLAPI_UserPool_region(t *testing.T) { +func TestAccAppSyncGraphQLAPI_UserPool_region(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -736,14 +736,14 @@ func testAccGraphQLAPI_UserPool_region(t *testing.T) { }) } -func testAccGraphQLAPI_UserPool_defaultAction(t *testing.T) { +func TestAccAppSyncGraphQLAPI_UserPool_defaultAction(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -780,14 +780,14 @@ func testAccGraphQLAPI_UserPool_defaultAction(t *testing.T) { }) } -func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerURI(t *testing.T) { +func TestAccAppSyncGraphQLAPI_LambdaAuthorizerConfig_authorizerURI(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -820,14 +820,14 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerURI(t *testing.T) { }) } -func testAccGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression(t *testing.T) { +func TestAccAppSyncGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -862,13 +862,13 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression(t *te }) } -func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds(t *testing.T) { +func TestAccAppSyncGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds(t *testing.T) { ctx := acctest.Context(t) var api1, api2, api3, api4 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -919,59 +919,13 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds(t *te }) } -func testAccGraphQLAPI_tags(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_apiKey(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccGraphQLAPIConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckGraphQLAPIExists(ctx, resourceName, &api1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccGraphQLAPIConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGraphQLAPIExists(ctx, resourceName, &api1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccGraphQLAPIConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckGraphQLAPIExists(ctx, resourceName, &api1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - -func testAccGraphQLAPI_AdditionalAuthentication_apiKey(t *testing.T) { - ctx := acctest.Context(t) - var api1 awstypes.GraphqlApi - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_appsync_graphql_api.test" - - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1000,13 +954,13 @@ func testAccGraphQLAPI_AdditionalAuthentication_apiKey(t *testing.T) { }) } -func testAccGraphQLAPI_AdditionalAuthentication_iam(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_iam(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1035,14 +989,14 @@ func testAccGraphQLAPI_AdditionalAuthentication_iam(t *testing.T) { }) } -func testAccGraphQLAPI_AdditionalAuthentication_cognitoUserPools(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_cognitoUserPools(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1072,13 +1026,13 @@ func testAccGraphQLAPI_AdditionalAuthentication_cognitoUserPools(t *testing.T) { }) } -func testAccGraphQLAPI_AdditionalAuthentication_openIDConnect(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_openIDConnect(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1108,14 +1062,14 @@ func testAccGraphQLAPI_AdditionalAuthentication_openIDConnect(t *testing.T) { }) } -func testAccGraphQLAPI_AdditionalAuthentication_lambda(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_lambda(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1147,7 +1101,7 @@ func testAccGraphQLAPI_AdditionalAuthentication_lambda(t *testing.T) { }) } -func testAccGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { +func TestAccAppSyncGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1155,7 +1109,7 @@ func testAccGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { lambdaAuthorizerResourceName := "aws_lambda_function.test" resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1199,13 +1153,13 @@ func testAccGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { }) } -func testAccGraphQLAPI_xrayEnabled(t *testing.T) { +func TestAccAppSyncGraphQLAPI_xrayEnabled(t *testing.T) { ctx := acctest.Context(t) var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1229,13 +1183,13 @@ func testAccGraphQLAPI_xrayEnabled(t *testing.T) { }) } -func testAccGraphQLAPI_visibility(t *testing.T) { +func TestAccAppSyncGraphQLAPI_visibility(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1258,13 +1212,13 @@ func testAccGraphQLAPI_visibility(t *testing.T) { }) } -func testAccGraphQLAPI_introspectionConfig(t *testing.T) { +func TestAccAppSyncGraphQLAPI_introspectionConfig(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1286,13 +1240,13 @@ func testAccGraphQLAPI_introspectionConfig(t *testing.T) { }) } -func testAccGraphQLAPI_queryDepthLimit(t *testing.T) { +func TestAccAppSyncGraphQLAPI_queryDepthLimit(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1314,13 +1268,13 @@ func testAccGraphQLAPI_queryDepthLimit(t *testing.T) { }) } -func testAccGraphQLAPI_resolverCountLimit(t *testing.T) { +func TestAccAppSyncGraphQLAPI_resolverCountLimit(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1403,13 +1357,13 @@ func testAccCheckGraphQLAPITypeExists(ctx context.Context, n, typeName string) r } } -func testAccGraphQLAPI_apiType(t *testing.T) { +func TestAccAppSyncGraphQLAPI_apiType(t *testing.T) { ctx := acctest.Context(t) var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" - resource.Test(t, resource.TestCase{ + acctest.ParallelTest(ctx, t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -1737,33 +1691,6 @@ resource "aws_appsync_graphql_api" "test" { `, rName) } -func testAccGraphQLAPIConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_appsync_graphql_api" "test" { - authentication_type = "API_KEY" - name = %[1]q - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccGraphQLAPIConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_appsync_graphql_api" "test" { - authentication_type = "API_KEY" - name = %[1]q - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccGraphQLAPIConfig_additionalAuthAuthType(rName, defaultAuthType, additionalAuthType string) string { return fmt.Sprintf(` resource "aws_appsync_graphql_api" "test" { diff --git a/internal/service/appsync/testdata/GraphQLAPI/tags/main_gen.tf b/internal/service/appsync/testdata/GraphQLAPI/tags/main_gen.tf index 6d1bf7d6933e..ae3385166516 100644 --- a/internal/service/appsync/testdata/GraphQLAPI/tags/main_gen.tf +++ b/internal/service/appsync/testdata/GraphQLAPI/tags/main_gen.tf @@ -4,7 +4,6 @@ resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" name = var.rName - visibility = var.rName tags = var.resource_tags } diff --git a/internal/service/appsync/testdata/GraphQLAPI/tagsComputed1/main_gen.tf b/internal/service/appsync/testdata/GraphQLAPI/tagsComputed1/main_gen.tf index 9f4abbf9099f..3a4b86a8e0f5 100644 --- a/internal/service/appsync/testdata/GraphQLAPI/tagsComputed1/main_gen.tf +++ b/internal/service/appsync/testdata/GraphQLAPI/tagsComputed1/main_gen.tf @@ -6,7 +6,6 @@ provider "null" {} resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" name = var.rName - visibility = var.rName tags = { (var.unknownTagKey) = null_resource.test.id diff --git a/internal/service/appsync/testdata/GraphQLAPI/tagsComputed2/main_gen.tf b/internal/service/appsync/testdata/GraphQLAPI/tagsComputed2/main_gen.tf index 8adb41deb270..0b47645945c3 100644 --- a/internal/service/appsync/testdata/GraphQLAPI/tagsComputed2/main_gen.tf +++ b/internal/service/appsync/testdata/GraphQLAPI/tagsComputed2/main_gen.tf @@ -6,7 +6,6 @@ provider "null" {} resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" name = var.rName - visibility = var.rName tags = { (var.unknownTagKey) = null_resource.test.id diff --git a/internal/service/appsync/testdata/GraphQLAPI/tags_defaults/main_gen.tf b/internal/service/appsync/testdata/GraphQLAPI/tags_defaults/main_gen.tf index ca619fa10da2..2dd99be8d6db 100644 --- a/internal/service/appsync/testdata/GraphQLAPI/tags_defaults/main_gen.tf +++ b/internal/service/appsync/testdata/GraphQLAPI/tags_defaults/main_gen.tf @@ -10,7 +10,6 @@ provider "aws" { resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" name = var.rName - visibility = var.rName tags = var.resource_tags } diff --git a/internal/service/appsync/testdata/GraphQLAPI/tags_ignore/main_gen.tf b/internal/service/appsync/testdata/GraphQLAPI/tags_ignore/main_gen.tf index e5fa339029a9..d177ccac4646 100644 --- a/internal/service/appsync/testdata/GraphQLAPI/tags_ignore/main_gen.tf +++ b/internal/service/appsync/testdata/GraphQLAPI/tags_ignore/main_gen.tf @@ -13,7 +13,6 @@ provider "aws" { resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" name = var.rName - visibility = var.rName tags = var.resource_tags } diff --git a/internal/service/appsync/testdata/tmpl/graphql_api_tags.gtpl b/internal/service/appsync/testdata/tmpl/graphql_api_tags.gtpl index ac83faa0b022..7576dfaf4246 100644 --- a/internal/service/appsync/testdata/tmpl/graphql_api_tags.gtpl +++ b/internal/service/appsync/testdata/tmpl/graphql_api_tags.gtpl @@ -2,7 +2,6 @@ resource "aws_appsync_graphql_api" "test" { {{- template "region" }} authentication_type = "API_KEY" name = var.rName - visibility = var.rName {{- template "tags" . }} } diff --git a/internal/service/batch/job_queue.go b/internal/service/batch/job_queue.go index 1f9d55913964..016528d222bb 100644 --- a/internal/service/batch/job_queue.go +++ b/internal/service/batch/job_queue.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "iter" + "slices" "time" "github.com/YakDriver/regexache" @@ -16,6 +18,10 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -23,6 +29,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -30,6 +37,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/provider/framework/listresource" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -51,10 +60,18 @@ func newJobQueueResource(_ context.Context) (resource.ResourceWithConfigure, err return &r, nil } +// @FrameworkListResource("aws_batch_job_queue") +func jobQueueResourceAsListResource() list.ListResourceWithConfigure { + return &jobQueueResource{} +} + +var _ list.ListResource = &jobQueueResource{} + type jobQueueResource struct { framework.ResourceWithModel[jobQueueResourceModel] framework.WithTimeouts framework.WithImportByIdentity + framework.WithList } func (r *jobQueueResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { @@ -213,7 +230,6 @@ func (r *jobQueueResource) Read(ctx context.Context, request resource.ReadReques return } - // Set attributes for import. response.Diagnostics.Append(fwflex.Flatten(ctx, jobQueue, &data, fwflex.WithFieldNamePrefix("JobQueue"))...) if response.Diagnostics.HasError() { return @@ -369,11 +385,11 @@ func (r *jobQueueResource) UpgradeState(ctx context.Context) map[int64]resource. } func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awstypes.JobQueueDetail, error) { - input := &batch.DescribeJobQueuesInput{ + input := batch.DescribeJobQueuesInput{ JobQueues: []string{id}, } - output, err := findJobQueue(ctx, conn, input) + output, err := findJobQueue(ctx, conn, &input) if err != nil { return nil, err @@ -381,8 +397,7 @@ func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awst if status := output.Status; status == awstypes.JQStatusDeleted { return nil, &retry.NotFoundError{ - Message: string(status), - LastRequest: input, + Message: string(status), } } @@ -390,30 +405,7 @@ func findJobQueueByID(ctx context.Context, conn *batch.Client, id string) (*awst } func findJobQueue(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) (*awstypes.JobQueueDetail, error) { - output, err := findJobQueues(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSingleValueResult(output) -} - -func findJobQueues(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) ([]awstypes.JobQueueDetail, error) { - var output []awstypes.JobQueueDetail - - pages := batch.NewDescribeJobQueuesPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if err != nil { - return nil, err - } - - output = append(output, page.JobQueues...) - } - - return output, nil + return tfresource.AssertSingleValueResultIterErr(listJobQueues(ctx, conn, input)) } func statusJobQueue(ctx context.Context, conn *batch.Client, id string) retry.StateRefreshFunc { @@ -525,3 +517,152 @@ type jobStateTimeLimitActionModel struct { Reason types.String `tfsdk:"reason"` State fwtypes.StringEnum[awstypes.JobStateTimeLimitActionsState] `tfsdk:"state"` } + +// DescribeJobQueues is an "All-Or-Some" call. +func listJobQueues(ctx context.Context, conn *batch.Client, input *batch.DescribeJobQueuesInput) iter.Seq2[awstypes.JobQueueDetail, error] { + return func(yield func(awstypes.JobQueueDetail, error) bool) { + pages := batch.NewDescribeJobQueuesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + yield(awstypes.JobQueueDetail{}, fmt.Errorf("listing Batch Job Queues: %w", err)) + return + } + + for _, jobQueue := range page.JobQueues { + if !yield(jobQueue, nil) { + return + } + } + } + } +} + +func (r jobQueueResource) ListResourceConfigSchema(_ context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{}, + } +} + +func (r jobQueueResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + var query jobQueueListModel + + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + awsClient := r.Meta() + conn := awsClient.BatchClient(ctx) + + resultInterceptors := r.ResultInterceptors() + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + var input batch.DescribeJobQueuesInput + for jobQueue, err := range listJobQueues(ctx, conn, &input) { + if err != nil { + result = list.ListResult{ + Diagnostics: diag.Diagnostics{ + diag.NewErrorDiagnostic( + "Error Listing Remote Resources", + fmt.Sprintf("Error: %s", err), + ), + }, + } + yield(result) + return + } + + ctx = tftags.NewContext(ctx, awsClient.DefaultTagsConfig(ctx), awsClient.IgnoreTagsConfig(ctx)) + + var data jobQueueResourceModel + + timeoutsType, _ := result.Resource.Schema.TypeAtPath(ctx, path.Root(names.AttrTimeouts)) + obj, _ := newNullObject(timeoutsType) + data.Timeouts.Object = obj + + typ, _ := result.Resource.Schema.TypeAtPath(ctx, path.Root(names.AttrTags)) + tagsType := typ.(attr.TypeWithElementType) + data.Tags.MapValue = basetypes.NewMapNull(tagsType.ElementType()) + data.TagsAll.MapValue = basetypes.NewMapNull(tagsType.ElementType()) + + params := listresource.InterceptorParams{ + C: awsClient, + Result: &result, + } + + params.When = listresource.Before + for interceptor := range slices.Values(resultInterceptors) { + d := interceptor.Read(ctx, params) // nosemgrep:ci.semgrep.migrate.direct-CRUD-calls + result.Diagnostics.Append(d...) + if d.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + } + + if diags := fwflex.Flatten(ctx, jobQueue, &data, fwflex.WithFieldNamePrefix("JobQueue")); diags.HasError() { + result.Diagnostics.Append(diags...) + } + + setTagsOut(ctx, jobQueue.Tags) + + if diags := result.Resource.Set(ctx, &data); diags.HasError() { + result.Diagnostics.Append(diags...) + return + } + + result.DisplayName = data.JobQueueName.ValueString() + + params.When = listresource.After + for interceptor := range tfslices.BackwardValues(resultInterceptors) { + d := interceptor.Read(ctx, params) // nosemgrep:ci.semgrep.migrate.direct-CRUD-calls + result.Diagnostics.Append(d...) + if d.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + } + + if result.Diagnostics.HasError() { + result = list.ListResult{Diagnostics: result.Diagnostics} + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +type jobQueueListModel struct { + // TODO: factor out + Region types.String `tfsdk:"region"` +} + +func newNullObject(typ attr.Type) (obj basetypes.ObjectValue, diags diag.Diagnostics) { + i, ok := typ.(attr.TypeWithAttributeTypes) + if !ok { + diags.AddError( + "Internal Error", + "An unexpected error occurred. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expected value type to implement attr.TypeWithAttributeTypes, got: %T", typ), + ) + return + } + + attrTypes := i.AttributeTypes() + + obj = basetypes.NewObjectNull(attrTypes) + + return obj, diags +} diff --git a/internal/service/batch/job_queue_list_test.go b/internal/service/batch/job_queue_list_test.go new file mode 100644 index 000000000000..f23b1c159ef3 --- /dev/null +++ b/internal/service/batch/job_queue_list_test.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBatchJobQueue_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_batch_job_queue.test[0]" + resourceName2 := "aws_batch_job_queue.test[1]" + resourceName3 := "aws_batch_job_queue.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-0"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-1"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNExact("batch", "job-queue/"+rName+"-2"), + }), + }, + }, + }, + }) +} + +func TestAccBatchJobQueue_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_batch_job_queue.test[0]" + resourceName2 := "aws_batch_job_queue.test[1]" + resourceName3 := "aws_batch_job_queue.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchServiceID), + CheckDestroy: testAccCheckJobQueueDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/JobQueue/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-0"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-1"), + }), + querycheck.ExpectIdentity("aws_batch_job_queue.test", map[string]knownvalue.Check{ + names.AttrARN: tfknownvalue.RegionalARNAlternateRegionExact("batch", "job-queue/"+rName+"-2"), + }), + }, + }, + }, + }) +} diff --git a/internal/service/batch/service_package_gen.go b/internal/service/batch/service_package_gen.go index 471e6d418208..a924a1186d8c 100644 --- a/internal/service/batch/service_package_gen.go +++ b/internal/service/batch/service_package_gen.go @@ -4,6 +4,8 @@ package batch import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" @@ -47,6 +49,21 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser } } +func (p *servicePackage) FrameworkListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageFrameworkListResource] { + return slices.Values([]*inttypes.ServicePackageFrameworkListResource{ + { + Factory: jobQueueResourceAsListResource, + TypeName: "aws_batch_job_queue", + Name: "Job Queue", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity(inttypes.WithIdentityDuplicateAttrs(names.AttrID)), + }, + }) +} + func (p *servicePackage) SDKDataSources(ctx context.Context) []*inttypes.ServicePackageSDKDataSource { return []*inttypes.ServicePackageSDKDataSource{ { diff --git a/internal/service/batch/sweep.go b/internal/service/batch/sweep.go index 226168855c27..23527525410f 100644 --- a/internal/service/batch/sweep.go +++ b/internal/service/batch/sweep.go @@ -217,30 +217,24 @@ func sweepJobQueues(region string) error { if err != nil { return fmt.Errorf("getting client: %w", err) } - input := &batch.DescribeJobQueuesInput{} + input := batch.DescribeJobQueuesInput{} conn := client.BatchClient(ctx) - sweepResources := make([]sweep.Sweepable, 0) - - pages := batch.NewDescribeJobQueuesPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if awsv2.SkipSweepError(err) { - log.Printf("[WARN] Skipping Batch Job Queue sweep for %s: %s", region, err) - return nil - } + var sweepResources []sweep.Sweepable + for jobQueue, err := range listJobQueues(ctx, conn, &input) { if err != nil { + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Batch Job Queue sweep for %s: %s", region, err) + return nil + } return fmt.Errorf("error listing Batch Job Queues (%s): %w", region, err) } - for _, v := range page.JobQueues { - id := aws.ToString(v.JobQueueArn) + id := aws.ToString(jobQueue.JobQueueArn) - sweepResources = append(sweepResources, framework.NewSweepResource(newJobQueueResource, client, - framework.NewAttribute(names.AttrID, id), - )) - } + sweepResources = append(sweepResources, framework.NewSweepResource(newJobQueueResource, client, + framework.NewAttribute(names.AttrID, id), + )) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/batch/testdata/JobQueue/basic/main_gen.tf b/internal/service/batch/testdata/JobQueue/basic/main_gen.tf index 408255fe6423..e289fa7dee63 100644 --- a/internal/service/batch/testdata/JobQueue/basic/main_gen.tf +++ b/internal/service/batch/testdata/JobQueue/basic/main_gen.tf @@ -57,7 +57,7 @@ resource "aws_iam_role" "ecs_instance" { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "ec2.${data.aws_partition.current.dns_suffix}" } } ] diff --git a/internal/service/batch/testdata/JobQueue/basic_v6.0.0/main_gen.tf b/internal/service/batch/testdata/JobQueue/basic_v6.0.0/main_gen.tf index 0f1c2a9eb72b..09504e10df97 100644 --- a/internal/service/batch/testdata/JobQueue/basic_v6.0.0/main_gen.tf +++ b/internal/service/batch/testdata/JobQueue/basic_v6.0.0/main_gen.tf @@ -57,7 +57,7 @@ resource "aws_iam_role" "ecs_instance" { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "ec2.${data.aws_partition.current.dns_suffix}" } } ] diff --git a/internal/service/batch/testdata/JobQueue/data.tags/main_gen.tf b/internal/service/batch/testdata/JobQueue/data.tags/main_gen.tf index 2af86694eb16..1c9d17191c9f 100644 --- a/internal/service/batch/testdata/JobQueue/data.tags/main_gen.tf +++ b/internal/service/batch/testdata/JobQueue/data.tags/main_gen.tf @@ -64,7 +64,7 @@ resource "aws_iam_role" "ecs_instance" { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "ec2.${data.aws_partition.current.dns_suffix}" } } ] diff --git a/internal/service/batch/testdata/JobQueue/data.tags_defaults/main_gen.tf b/internal/service/batch/testdata/JobQueue/data.tags_defaults/main_gen.tf index 197154fbc343..7f7853283bd3 100644 --- a/internal/service/batch/testdata/JobQueue/data.tags_defaults/main_gen.tf +++ b/internal/service/batch/testdata/JobQueue/data.tags_defaults/main_gen.tf @@ -70,7 +70,7 @@ resource "aws_iam_role" "ecs_instance" { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "ec2.${data.aws_partition.current.dns_suffix}" } } ] diff --git a/internal/service/batch/testdata/JobQueue/data.tags_ignore/main_gen.tf b/internal/service/batch/testdata/JobQueue/data.tags_ignore/main_gen.tf index 4b816a8c6547..4ea7e309d765 100644 --- a/internal/service/batch/testdata/JobQueue/data.tags_ignore/main_gen.tf +++ b/internal/service/batch/testdata/JobQueue/data.tags_ignore/main_gen.tf @@ -73,7 +73,7 @@ resource "aws_iam_role" "ecs_instance" { "Action": "sts:AssumeRole", "Effect": "Allow", "Principal": { - "Service": "ec2.${data.aws_partition.current.dns_suffix}" + "Service": "ec2.${data.aws_partition.current.dns_suffix}" } } ] diff --git a/internal/service/batch/testdata/JobQueue/list_basic/main.tfquery.hcl b/internal/service/batch/testdata/JobQueue/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..c5c83d720e01 --- /dev/null +++ b/internal/service/batch/testdata/JobQueue/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_batch_job_queue" "test" { + provider = aws +} diff --git a/internal/service/batch/testdata/JobQueue/list_basic/main_gen.tf b/internal/service/batch/testdata/JobQueue/list_basic/main_gen.tf new file mode 100644 index 000000000000..ee816f4fe443 --- /dev/null +++ b/internal/service/batch/testdata/JobQueue/list_basic/main_gen.tf @@ -0,0 +1,86 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_batch_job_queue" "test" { + count = 3 + + name = "${var.rName}-${count.index}" + priority = 1 + state = "DISABLED" + + compute_environment_order { + compute_environment = aws_batch_compute_environment.test.arn + order = 1 + } +} + +resource "aws_batch_compute_environment" "test" { + name = var.rName + service_role = aws_iam_role.batch_service.arn + type = "UNMANAGED" + + depends_on = [aws_iam_role_policy_attachment.batch_service] +} + +data "aws_partition" "current" {} + +resource "aws_iam_role" "batch_service" { + name = "${var.rName}-batch-service" + + assume_role_policy = <= progressInterval { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Invalidation %s is currently '%s', continuing to wait for completion...", invalidationID, currentStatus), + }) + lastProgressUpdate = time.Now() + } + + // Check if we've reached completion + if aws.ToString(output.Invalidation.Status) == "Completed" { + return nil + } + + // Check if we're in an unexpected state + validStatuses := []string{ + "InProgress", + } + if !slices.Contains(validStatuses, currentStatus) && currentStatus != "Completed" { + return fmt.Errorf("invalidation entered unexpected status: %s", currentStatus) + } + + // Wait before next poll + time.Sleep(pollInterval) + } +} diff --git a/internal/service/cloudfront/create_invalidation_action_test.go b/internal/service/cloudfront/create_invalidation_action_test.go new file mode 100644 index 000000000000..db37d7f7c46e --- /dev/null +++ b/internal/service/cloudfront/create_invalidation_action_test.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudfront_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/cloudfront" + awstypes "github.com/aws/aws-sdk-go-v2/service/cloudfront/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCloudFrontCreateInvalidationAction_basic(t *testing.T) { + ctx := acctest.Context(t) + var distribution awstypes.Distribution + resourceName := "aws_cloudfront_distribution.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: testAccCheckDistributionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCreateInvalidationActionConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDistributionExists(ctx, resourceName, &distribution), + testAccCheckInvalidationExists(ctx, &distribution, []string{"/*"}), + ), + }, + }, + }) +} + +// Helper: Check invalidation exists and is completed +func testAccCheckInvalidationExists(ctx context.Context, distribution *awstypes.Distribution, expectedPaths []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if distribution == nil || distribution.Id == nil { + return fmt.Errorf("Distribution is nil or has no ID") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFrontClient(ctx) + + // List invalidations for this distribution + listInput := &cloudfront.ListInvalidationsInput{ + DistributionId: distribution.Id, + } + out, err := conn.ListInvalidations(ctx, listInput) + if err != nil { + return fmt.Errorf("failed to list invalidations: %w", err) + } + + if len(out.InvalidationList.Items) == 0 { + return fmt.Errorf("no invalidations found for distribution %s", *distribution.Id) + } + + // Get the most recent invalidation + latest := out.InvalidationList.Items[0] + + // Get invalidation details + getInput := &cloudfront.GetInvalidationInput{ + DistributionId: distribution.Id, + Id: latest.Id, + } + getOut, err := conn.GetInvalidation(ctx, getInput) + if err != nil { + return fmt.Errorf("failed to get invalidation %s: %w", *latest.Id, err) + } + + invalidation := getOut.Invalidation + + // Check that the invalidation contains the expected paths + if invalidation.InvalidationBatch == nil || invalidation.InvalidationBatch.Paths == nil { + return fmt.Errorf("invalidation batch or paths is nil") + } + + actualPaths := invalidation.InvalidationBatch.Paths.Items + if len(actualPaths) != len(expectedPaths) { + return fmt.Errorf("expected %d paths, got %d", len(expectedPaths), len(actualPaths)) + } + + // Create a map for easy lookup + pathMap := make(map[string]bool) + for _, path := range actualPaths { + pathMap[path] = true + } + + // Check each expected path exists + for _, expectedPath := range expectedPaths { + if !pathMap[expectedPath] { + return fmt.Errorf("expected path %s not found in invalidation", expectedPath) + } + } + + // Wait for invalidation to complete (with timeout) + maxAttempts := 60 // 10 minutes at 10-second intervals + for attempt := range maxAttempts { + statusInput := &cloudfront.GetInvalidationInput{ + DistributionId: distribution.Id, + Id: latest.Id, + } + statusOut, err := conn.GetInvalidation(ctx, statusInput) + if err != nil { + return fmt.Errorf("failed to check invalidation status: %w", err) + } + + if *statusOut.Invalidation.Status == "Completed" { + return nil + } + + if attempt < maxAttempts-1 { + time.Sleep(10 * time.Second) + } + } + + return fmt.Errorf("invalidation %s did not complete within timeout", *latest.Id) + } +} + +// Terraform configuration with action trigger +func testAccCreateInvalidationActionConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudfront_distribution" "test" { + # Use faster settings for testing + enabled = true + wait_for_deployment = false + + default_cache_behavior { + allowed_methods = ["GET", "HEAD"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "test" + viewer_protocol_policy = "allow-all" + + forwarded_values { + query_string = false + + cookies { + forward = "none" + } + } + + min_ttl = 0 + default_ttl = 0 + max_ttl = 0 + } + + origin { + domain_name = "www.example.com" + origin_id = "test" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "https-only" + origin_ssl_protocols = ["TLSv1.2"] + } + } + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } + + tags = { + Name = %[1]q + } +} + +action "aws_cloudfront_create_invalidation" "test" { + config { + distribution_id = aws_cloudfront_distribution.test.id + paths = ["/*"] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_cloudfront_create_invalidation.test] + } + } +} +`, rName) +} diff --git a/internal/service/cloudfront/function_test.go b/internal/service/cloudfront/function_test.go index 2bfd28ee204b..6e5685f669e8 100644 --- a/internal/service/cloudfront/function_test.go +++ b/internal/service/cloudfront/function_test.go @@ -27,6 +27,7 @@ func init() { func testAccErrorCheckSkipFunction(t *testing.T) resource.ErrorCheckFunc { return acctest.ErrorCheckSkipMessagesContaining(t, "InvalidParameterValueException: Unsupported source arn", + "AccessDenied", ) } diff --git a/internal/service/cloudfront/service_package_gen.go b/internal/service/cloudfront/service_package_gen.go index cc54f5f4075f..1db1f6705adc 100644 --- a/internal/service/cloudfront/service_package_gen.go +++ b/internal/service/cloudfront/service_package_gen.go @@ -17,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newCreateInvalidationAction, + TypeName: "aws_cloudfront_create_invalidation", + Name: "Create Invalidation", + Region: unique.Make(inttypes.ResourceRegionDisabled()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ { diff --git a/internal/service/ec2/ec2_instance.go b/internal/service/ec2/ec2_instance.go index 120200b94c9c..2f3ad92fa9ff 100644 --- a/internal/service/ec2/ec2_instance.go +++ b/internal/service/ec2/ec2_instance.go @@ -10,6 +10,7 @@ import ( "encoding/hex" "errors" "fmt" + "iter" "log" "maps" "slices" @@ -23,6 +24,11 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" + frameworkdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -35,8 +41,11 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -1063,6 +1072,14 @@ func throughputDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool return strings.ToLower(v) != string(awstypes.VolumeTypeGp3) && new == "0" } +// @SDKListResource("aws_instance") +func instanceResourceAsListResource() itypes.ListResourceForSDK { + l := instanceListResource{} + l.SetResourceSchema(resourceInstance()) + + return &l +} + func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).EC2Client(ctx) @@ -1216,578 +1233,209 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta an return append(diags, resourceInstanceUpdate(ctx, d, meta)...) } -func resourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceInstanceRead(ctx context.Context, rd *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics c := meta.(*conns.AWSClient) conn := c.EC2Client(ctx) - instance, err := findInstanceByID(ctx, conn, d.Id()) + instance, err := findInstanceByID(ctx, conn, rd.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] EC2 Instance %s not found, removing from state", d.Id()) - d.SetId("") + if !rd.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] EC2 Instance %s not found, removing from state", rd.Id()) + rd.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) } - instanceType := string(instance.InstanceType) - instanceTypeInfo, err := findInstanceTypeByName(ctx, conn, instanceType) + diags = append(diags, resourceInstanceFlatten(ctx, c, instance, rd)...) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance Type (%s): %s", instanceType, err) - } + return diags +} - d.Set("instance_state", instance.State.Name) +func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EC2Client(ctx) - if v := instance.Placement; v != nil { - d.Set(names.AttrAvailabilityZone, v.AvailabilityZone) - d.Set("host_id", v.HostId) - if v := v.HostResourceGroupArn; v != nil { - d.Set("host_resource_group_arn", instance.Placement.HostResourceGroupArn) + if d.HasChange("volume_tags") && !d.IsNewResource() { + volIDs, err := getInstanceVolIDs(ctx, conn, d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) } - d.Set("placement_group", v.GroupName) - d.Set("placement_group_id", v.GroupId) - d.Set("placement_partition_number", v.PartitionNumber) - d.Set("tenancy", v.Tenancy) - } - - if err := d.Set("cpu_options", flattenCPUOptions(instance.CpuOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting cpu_options: %s", err) - } - - if v := instance.HibernationOptions; v != nil { - d.Set("hibernation", v.Configured) - } - if err := d.Set("enclave_options", flattenEnclaveOptions(instance.EnclaveOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting enclave_options: %s", err) - } + o, n := d.GetChange("volume_tags") - if instance.MaintenanceOptions != nil { - if err := d.Set("maintenance_options", []any{flattenInstanceMaintenanceOptions(instance.MaintenanceOptions)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting maintenance_options: %s", err) + for _, volID := range volIDs { + if err := updateTags(ctx, conn, volID, o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating volume_tags (%s): %s", volID, err) + } } - } else { - d.Set("maintenance_options", nil) } - if err := d.Set("metadata_options", flattenInstanceMetadataOptions(instance.MetadataOptions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting metadata_options: %s", err) - } - - if instance.PrivateDnsNameOptions != nil { - if err := d.Set("private_dns_name_options", []any{flattenPrivateDNSNameOptionsResponse(instance.PrivateDnsNameOptions)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting private_dns_name_options: %s", err) + if d.HasChange("iam_instance_profile") && !d.IsNewResource() { + input := ec2.DescribeIamInstanceProfileAssociationsInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String("instance-id"), + Values: []string{d.Id()}, + }, + }, } - } else { - d.Set("private_dns_name_options", nil) - } - - d.Set("ami", instance.ImageId) - d.Set(names.AttrInstanceType, instanceType) - d.Set("key_name", instance.KeyName) - d.Set("public_dns", instance.PublicDnsName) - d.Set("public_ip", instance.PublicIpAddress) - d.Set("private_dns", instance.PrivateDnsName) - d.Set("private_ip", instance.PrivateIpAddress) - d.Set("outpost_arn", instance.OutpostArn) - - if instance.IamInstanceProfile != nil && instance.IamInstanceProfile.Arn != nil { - name, err := instanceProfileARNToName(aws.ToString(instance.IamInstanceProfile.Arn)) + resp, err := conn.DescribeIamInstanceProfileAssociations(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting iam_instance_profile: %s", err) + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) } - d.Set("iam_instance_profile", name) - } else { - d.Set("iam_instance_profile", nil) - } - - { - launchTemplate, err := flattenInstanceLaunchTemplate(ctx, conn, d.Id(), d.Get("launch_template.0.version").(string)) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) launch template: %s", d.Id(), err) - } + // An Iam Instance Profile has been provided and is pending a change + // This means it is an association or a replacement to an association + if _, ok := d.GetOk("iam_instance_profile"); ok { + // Does not have an Iam Instance Profile associated with it, need to associate + if len(resp.IamInstanceProfileAssociations) == 0 { + if err := associateInstanceProfile(ctx, d, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + } else { + // Has an Iam Instance Profile associated with it, need to replace the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + input := ec2.ReplaceIamInstanceProfileAssociationInput{ + AssociationId: associationId, + IamInstanceProfile: &awstypes.IamInstanceProfileSpecification{ + Name: aws.String(d.Get("iam_instance_profile").(string)), + }, + } - if err := d.Set(names.AttrLaunchTemplate, launchTemplate); err != nil { - return sdkdiag.AppendErrorf(diags, "setting launch_template: %s", err) - } - } + // If the instance is running, we can replace the instance profile association. + // If it is stopped, the association must be removed and the new one attached separately. (GH-8262) + instanceState := awstypes.InstanceStateName(d.Get("instance_state").(string)) - // Set configured Network Interface Device Index Slice - // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other - // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy - // diff. We should only read on changes configured for this specific resource because of this. - var configuredDeviceIndexes []int - if v, ok := d.GetOk("network_interface"); ok { - vL := v.(*schema.Set).List() - for _, vi := range vL { - mVi := vi.(map[string]any) - configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) - } - } + if instanceState != "" { + if instanceState == awstypes.InstanceStateNameStopped || instanceState == awstypes.InstanceStateNameStopping || instanceState == awstypes.InstanceStateNameShuttingDown { + if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + if err := associateInstanceProfile(ctx, d, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } + } else { + err := tfresource.Retry(ctx, iamPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { + _, err := conn.ReplaceIamInstanceProfileAssociation(ctx, &input) + if err != nil { + if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { + return tfresource.RetryableError(err) + } + return tfresource.NonRetryableError(err) + } + return nil + }) - var secondaryPrivateIPs []string - var ipv6Addresses []string - if len(instance.NetworkInterfaces) > 0 { - var primaryNetworkInterface awstypes.InstanceNetworkInterface - var networkInterfaces []map[string]any - for _, iNi := range instance.NetworkInterfaces { - ni := make(map[string]any) - if aws.ToInt32(iNi.Attachment.DeviceIndex) == 0 { - primaryNetworkInterface = iNi - } - // If the attached network device is inside our configuration, refresh state with values found. - // Otherwise, assume the network device was attached via an outside resource. - for _, index := range configuredDeviceIndexes { - if index == int(aws.ToInt32(iNi.Attachment.DeviceIndex)) { - ni[names.AttrDeleteOnTermination] = aws.ToBool(iNi.Attachment.DeleteOnTermination) - ni["device_index"] = aws.ToInt32(iNi.Attachment.DeviceIndex) - ni["network_card_index"] = aws.ToInt32(iNi.Attachment.NetworkCardIndex) - ni[names.AttrNetworkInterfaceID] = aws.ToString(iNi.NetworkInterfaceId) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): replacing instance profile: %s", d.Id(), err) + } + } } } - // Don't add empty network interfaces to schema - if len(ni) == 0 { - continue + // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal + } else { + if len(resp.IamInstanceProfileAssociations) > 0 { + // Has an Iam Instance Profile associated with it, need to remove the association + associationId := resp.IamInstanceProfileAssociations[0].AssociationId + if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) + } } - networkInterfaces = append(networkInterfaces, ni) } - if err := d.Set("network_interface", networkInterfaces); err != nil { - return sdkdiag.AppendErrorf(diags, "setting network_interfaces: %v", err) + + if _, err := waitInstanceIAMInstanceProfileUpdated(ctx, conn, d.Id(), d.Get("iam_instance_profile").(string)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EC2 Instance (%s) IAM Instance Profile update: %s", d.Id(), err) } + } - // Set primary network interface details - // If an instance is shutting down, network interfaces are detached, and attributes may be nil, - // need to protect against nil pointer dereferences - if primaryNetworkInterface.NetworkInterfaceId != nil { - pni := map[string]any{ - names.AttrNetworkInterfaceID: aws.ToString(primaryNetworkInterface.NetworkInterfaceId), - names.AttrDeleteOnTermination: aws.ToBool(primaryNetworkInterface.Attachment.DeleteOnTermination), - } - if err := d.Set("primary_network_interface", []any{pni}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting primary_network_interface for AWS Instance (%s): %s", d.Id(), err) - } + // SourceDestCheck can only be modified on an instance without manually specified network interfaces. + // SourceDestCheck, in that case, is configured at the network interface level + if _, ok := d.GetOk("network_interface"); !ok { + // If we have a new resource and source_dest_check is still true, don't modify + sourceDestCheck := d.Get("source_dest_check").(bool) - d.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) - if primaryNetworkInterface.SubnetId != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check - d.Set(names.AttrSubnetID, primaryNetworkInterface.SubnetId) - } - d.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) - if primaryNetworkInterface.SourceDestCheck != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check - d.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) + // Because we're calling Update prior to Read, and the default value of `source_dest_check` is `true`, + // HasChange() thinks there is a diff between what is set on the instance and what is set in state. We need to ensure that + // if a diff has occurred, it's not because it's a new instance. + if d.HasChange("source_dest_check") && !d.IsNewResource() || d.IsNewResource() && !sourceDestCheck { + input := ec2.ModifyInstanceAttributeInput{ + InstanceId: aws.String(d.Id()), + SourceDestCheck: &awstypes.AttributeBooleanValue{ + Value: aws.Bool(sourceDestCheck), + }, } - d.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) + _, err := conn.ModifyInstanceAttribute(ctx, &input) - for _, address := range primaryNetworkInterface.PrivateIpAddresses { - if !aws.ToBool(address.Primary) { - secondaryPrivateIPs = append(secondaryPrivateIPs, aws.ToString(address.PrivateIpAddress)) - } + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) SourceDestCheck attribute: %s", d.Id(), err) } + } + } - for _, address := range primaryNetworkInterface.Ipv6Addresses { - ipv6Addresses = append(ipv6Addresses, aws.ToString(address.Ipv6Address)) - } + if d.HasChange("enable_primary_ipv6") && !d.IsNewResource() { + instance, err := FindInstanceByID(ctx, conn, d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) + } - if len(primaryNetworkInterface.Ipv6Addresses) > 0 { - if err := d.Set("enable_primary_ipv6", primaryNetworkInterface.Ipv6Addresses[0].IsPrimaryIpv6); err != nil { - return sdkdiag.AppendErrorf(diags, "setting enable_primary_ipv6: %s", err) - } + var primaryInterface *awstypes.InstanceNetworkInterface + for _, ni := range instance.NetworkInterfaces { + if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { + primaryInterface = &ni } } - } else { - d.Set("associate_public_ip_address", instance.PublicIpAddress != nil) - d.Set("ipv6_address_count", 0) - d.Set("primary_network_interface_id", "") - d.Set(names.AttrSubnetID, instance.SubnetId) - } - if err := d.Set("secondary_private_ips", secondaryPrivateIPs); err != nil { - return sdkdiag.AppendErrorf(diags, "setting private_ips for AWS Instance (%s): %s", d.Id(), err) - } + if primaryInterface == nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s), enable_primary_ipv6, which does not contain a primary network interface", d.Id()) + } - if err := d.Set("ipv6_addresses", ipv6Addresses); err != nil { - log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", d.Id(), err) - } + enablePrimaryIpv6 := d.Get("enable_primary_ipv6").(bool) - d.Set("ebs_optimized", instance.EbsOptimized) - if aws.ToString(instance.SubnetId) != "" { - d.Set("source_dest_check", instance.SourceDestCheck) - } + input := ec2.ModifyNetworkInterfaceAttributeInput{ + NetworkInterfaceId: primaryInterface.NetworkInterfaceId, + EnablePrimaryIpv6: aws.Bool(enablePrimaryIpv6), + } - if instance.Monitoring != nil && instance.Monitoring.State != "" { - monitoringState := instance.Monitoring.State - d.Set("monitoring", monitoringState == awstypes.MonitoringStateEnabled || monitoringState == awstypes.MonitoringStatePending) + _, err = conn.ModifyNetworkInterfaceAttribute(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) primary network interface: %s", d.Id(), err) + } } - setTagsOut(ctx, instance.Tags) - - if _, ok := d.GetOk("volume_tags"); ok && !blockDeviceTagsDefined(d) { - volumeTags, err := readVolumeTags(ctx, conn, d.Id()) + if d.HasChange("ipv6_address_count") && !d.IsNewResource() { + instance, err := findInstanceByID(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) } - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) - tags := keyValueTags(ctx, volumeTags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + var primaryInterface awstypes.InstanceNetworkInterface + for _, ni := range instance.NetworkInterfaces { + if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { + primaryInterface = ni + } + } - if err := d.Set("volume_tags", tags.ResolveDuplicates(ctx, defaultTagsConfig, ignoreTagsConfig, d, "volume_tags", nil).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting volume_tags: %s", err) + if primaryInterface.NetworkInterfaceId == nil { + return sdkdiag.AppendErrorf(diags, "Failed to update ipv6_address_count on %q, which does not contain a primary network interface", d.Id()) } - } - if err := readSecurityGroups(ctx, d, instance, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } + o, n := d.GetChange("ipv6_address_count") + os, ns := o.(int), n.(int) - // Retrieve instance shutdown behavior - if err := readInstanceShutdownBehavior(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } + if ns > os { + // Add more to the primary NIC. + input := ec2.AssignIpv6AddressesInput{ + NetworkInterfaceId: primaryInterface.NetworkInterfaceId, + Ipv6AddressCount: aws.Int32(int32(ns - os)), + } - if err := readBlockDevices(ctx, d, meta, instance, false); err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - if _, ok := d.GetOk("ephemeral_block_device"); !ok { - d.Set("ephemeral_block_device", []any{}) - } - - // ARN - - d.Set(names.AttrARN, instanceARN(ctx, c, d.Id())) - - // Instance attributes - { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameDisableApiStop, - InstanceId: aws.String(d.Id()), - } - attr, err := conn.DescribeInstanceAttribute(ctx, &input) - if err != nil && !errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiStop, err) - } - if !errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition(ctx), err) { - d.Set("disable_api_stop", attr.DisableApiStop.Value) - } - } - { - if isSnowballEdgeInstance(d.Id()) { - log.Printf("[INFO] Determined deploying to Snowball Edge based off Instance ID %s. Skip setting the 'disable_api_termination' attribute.", d.Id()) - } else { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameDisableApiTermination, - InstanceId: aws.String(d.Id()), - } - output, err := conn.DescribeInstanceAttribute(ctx, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiTermination, err) - } - - d.Set("disable_api_termination", output.DisableApiTermination.Value) - } - } - { - input := ec2.DescribeInstanceAttributeInput{ - Attribute: awstypes.InstanceAttributeNameUserData, - InstanceId: aws.String(d.Id()), - } - attr, err := conn.DescribeInstanceAttribute(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameUserData, err) - } - if attr.UserData != nil && attr.UserData.Value != nil { - // Since user_data and user_data_base64 conflict with each other, - // we'll only set one or the other here to avoid a perma-diff. - // Since user_data_base64 was added later, we'll prefer to set - // user_data. - _, b64 := d.GetOk("user_data_base64") - if b64 { - d.Set("user_data_base64", attr.UserData.Value) - } else { - data, err := itypes.Base64Decode(aws.ToString(attr.UserData.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "decoding user_data: %s", err) - } - d.Set("user_data", string(data)) - } - } - } - - // AWS Standard will return InstanceCreditSpecification.NotSupported errors for EC2 Instance IDs outside T2 and T3 instance types - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8055 - if aws.ToBool(instanceTypeInfo.BurstablePerformanceSupported) { - instanceCreditSpecification, err := findInstanceCreditSpecificationByID(ctx, conn, d.Id()) - - // Ignore UnsupportedOperation errors for AWS China and GovCloud (US). - // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/4362. - if tfawserr.ErrCodeEquals(err, errCodeUnsupportedOperation) { - err = nil - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) credit specification: %s", d.Id(), err) - } - - if instanceCreditSpecification != nil { - if err := d.Set("credit_specification", []any{flattenInstanceCreditSpecification(instanceCreditSpecification)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting credit_specification: %s", err) - } - } else { - d.Set("credit_specification", nil) - } - } - - if d.Get("get_password_data").(bool) { - passwordData, err := getInstancePasswordData(ctx, aws.ToString(instance.InstanceId), conn, d.Timeout(schema.TimeoutRead)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - d.Set("password_data", passwordData) - } else { - d.Set("get_password_data", false) - d.Set("password_data", nil) - } - - if instance.CapacityReservationSpecification != nil { - if err := d.Set("capacity_reservation_specification", []any{flattenCapacityReservationSpecificationResponse(instance.CapacityReservationSpecification)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting capacity_reservation_specification: %s", err) - } - } else { - d.Set("capacity_reservation_specification", nil) - } - - if spotInstanceRequestID := aws.ToString(instance.SpotInstanceRequestId); spotInstanceRequestID != "" && instance.InstanceLifecycle != "" { - d.Set("instance_lifecycle", instance.InstanceLifecycle) - d.Set("spot_instance_request_id", spotInstanceRequestID) - - input := ec2.DescribeSpotInstanceRequestsInput{ - SpotInstanceRequestIds: []string{spotInstanceRequestID}, - } - - apiObject, err := findSpotInstanceRequest(ctx, conn, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Spot Instance Request (%s): %s", spotInstanceRequestID, err) - } - - tfMap := map[string]any{ - "instance_interruption_behavior": apiObject.InstanceInterruptionBehavior, - "spot_instance_type": apiObject.Type, - } - - if v := apiObject.SpotPrice; v != nil { - tfMap["max_price"] = aws.ToString(v) - } - - if v := apiObject.ValidUntil; v != nil { - tfMap["valid_until"] = aws.ToTime(v).Format(time.RFC3339) - } - - if err := d.Set("instance_market_options", []any{map[string]any{ - "market_type": awstypes.MarketTypeSpot, - "spot_options": []any{tfMap}, - }}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting instance_market_options: %s", err) - } - } else { - d.Set("instance_lifecycle", nil) - d.Set("instance_market_options", nil) - d.Set("spot_instance_request_id", nil) - } - - return diags -} - -func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Client(ctx) - - if d.HasChange("volume_tags") && !d.IsNewResource() { - volIDs, err := getInstanceVolIDs(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - - o, n := d.GetChange("volume_tags") - - for _, volID := range volIDs { - if err := updateTags(ctx, conn, volID, o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating volume_tags (%s): %s", volID, err) - } - } - } - - if d.HasChange("iam_instance_profile") && !d.IsNewResource() { - input := ec2.DescribeIamInstanceProfileAssociationsInput{ - Filters: []awstypes.Filter{ - { - Name: aws.String("instance-id"), - Values: []string{d.Id()}, - }, - }, - } - - resp, err := conn.DescribeIamInstanceProfileAssociations(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - - // An Iam Instance Profile has been provided and is pending a change - // This means it is an association or a replacement to an association - if _, ok := d.GetOk("iam_instance_profile"); ok { - // Does not have an Iam Instance Profile associated with it, need to associate - if len(resp.IamInstanceProfileAssociations) == 0 { - if err := associateInstanceProfile(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } else { - // Has an Iam Instance Profile associated with it, need to replace the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - input := ec2.ReplaceIamInstanceProfileAssociationInput{ - AssociationId: associationId, - IamInstanceProfile: &awstypes.IamInstanceProfileSpecification{ - Name: aws.String(d.Get("iam_instance_profile").(string)), - }, - } - - // If the instance is running, we can replace the instance profile association. - // If it is stopped, the association must be removed and the new one attached separately. (GH-8262) - instanceState := awstypes.InstanceStateName(d.Get("instance_state").(string)) - - if instanceState != "" { - if instanceState == awstypes.InstanceStateNameStopped || instanceState == awstypes.InstanceStateNameStopping || instanceState == awstypes.InstanceStateNameShuttingDown { - if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - if err := associateInstanceProfile(ctx, d, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } else { - err := tfresource.Retry(ctx, iamPropagationTimeout, func(ctx context.Context) *tfresource.RetryError { - _, err := conn.ReplaceIamInstanceProfileAssociation(ctx, &input) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { - return tfresource.RetryableError(err) - } - return tfresource.NonRetryableError(err) - } - return nil - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): replacing instance profile: %s", d.Id(), err) - } - } - } - } - // An Iam Instance Profile has _not_ been provided but is pending a change. This means there is a pending removal - } else { - if len(resp.IamInstanceProfileAssociations) > 0 { - // Has an Iam Instance Profile associated with it, need to remove the association - associationId := resp.IamInstanceProfileAssociations[0].AssociationId - if err := disassociateInstanceProfile(ctx, associationId, conn); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EC2 Instance (%s): %s", d.Id(), err) - } - } - } - - if _, err := waitInstanceIAMInstanceProfileUpdated(ctx, conn, d.Id(), d.Get("iam_instance_profile").(string)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EC2 Instance (%s) IAM Instance Profile update: %s", d.Id(), err) - } - } - - // SourceDestCheck can only be modified on an instance without manually specified network interfaces. - // SourceDestCheck, in that case, is configured at the network interface level - if _, ok := d.GetOk("network_interface"); !ok { - // If we have a new resource and source_dest_check is still true, don't modify - sourceDestCheck := d.Get("source_dest_check").(bool) - - // Because we're calling Update prior to Read, and the default value of `source_dest_check` is `true`, - // HasChange() thinks there is a diff between what is set on the instance and what is set in state. We need to ensure that - // if a diff has occurred, it's not because it's a new instance. - if d.HasChange("source_dest_check") && !d.IsNewResource() || d.IsNewResource() && !sourceDestCheck { - input := ec2.ModifyInstanceAttributeInput{ - InstanceId: aws.String(d.Id()), - SourceDestCheck: &awstypes.AttributeBooleanValue{ - Value: aws.Bool(sourceDestCheck), - }, - } - - _, err := conn.ModifyInstanceAttribute(ctx, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) SourceDestCheck attribute: %s", d.Id(), err) - } - } - } - - if d.HasChange("enable_primary_ipv6") && !d.IsNewResource() { - instance, err := FindInstanceByID(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - var primaryInterface *awstypes.InstanceNetworkInterface - for _, ni := range instance.NetworkInterfaces { - if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { - primaryInterface = &ni - } - } - - if primaryInterface == nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s), enable_primary_ipv6, which does not contain a primary network interface", d.Id()) - } - - enablePrimaryIpv6 := d.Get("enable_primary_ipv6").(bool) - - input := ec2.ModifyNetworkInterfaceAttributeInput{ - NetworkInterfaceId: primaryInterface.NetworkInterfaceId, - EnablePrimaryIpv6: aws.Bool(enablePrimaryIpv6), - } - - _, err = conn.ModifyNetworkInterfaceAttribute(ctx, &input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying EC2 Instance (%s) primary network interface: %s", d.Id(), err) - } - } - - if d.HasChange("ipv6_address_count") && !d.IsNewResource() { - instance, err := findInstanceByID(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", d.Id(), err) - } - - var primaryInterface awstypes.InstanceNetworkInterface - for _, ni := range instance.NetworkInterfaces { - if aws.ToInt32(ni.Attachment.DeviceIndex) == 0 { - primaryInterface = ni - } - } - - if primaryInterface.NetworkInterfaceId == nil { - return sdkdiag.AppendErrorf(diags, "Failed to update ipv6_address_count on %q, which does not contain a primary network interface", d.Id()) - } - - o, n := d.GetChange("ipv6_address_count") - os, ns := o.(int), n.(int) - - if ns > os { - // Add more to the primary NIC. - input := ec2.AssignIpv6AddressesInput{ - NetworkInterfaceId: primaryInterface.NetworkInterfaceId, - Ipv6AddressCount: aws.Int32(int32(ns - os)), - } - - _, err := conn.AssignIpv6Addresses(ctx, &input) + _, err := conn.AssignIpv6Addresses(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "assigning EC2 Instance (%s) IPv6 addresses: %s", d.Id(), err) @@ -2375,7 +2023,7 @@ func modifyInstanceAttributeWithStopStart(ctx context.Context, conn *ec2.Client, return nil } -func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta any, instance *awstypes.Instance, ds bool) error { +func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta *conns.AWSClient, instance *awstypes.Instance, ds bool) error { ibds, err := readBlockDevicesFromInstance(ctx, d, meta, instance, ds) if err != nil { return fmt.Errorf("reading block devices: %w", err) @@ -2425,7 +2073,7 @@ func readBlockDevices(ctx context.Context, d *schema.ResourceData, meta any, ins return nil } -func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, meta any, instance *awstypes.Instance, ds bool) (map[string]any, error) { +func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, meta *conns.AWSClient, instance *awstypes.Instance, ds bool) (map[string]any, error) { blockDevices := make(map[string]any) blockDevices["ebs"] = make([]map[string]any, 0) blockDevices["root"] = nil @@ -2449,7 +2097,7 @@ func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, m // Need to call DescribeVolumes to get volume_size and volume_type for each // EBS block device - conn := meta.(*conns.AWSClient).EC2Client(ctx) + conn := meta.EC2Client(ctx) input := ec2.DescribeVolumesInput{ VolumeIds: volIDs, } @@ -2458,8 +2106,8 @@ func readBlockDevicesFromInstance(ctx context.Context, d *schema.ResourceData, m return nil, err } - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig(ctx) + defaultTagsConfig := meta.DefaultTagsConfig(ctx) + ignoreTagsConfig := meta.IgnoreTagsConfig(ctx) for _, vol := range volResp.Volumes { instanceBd := instanceBlockDevices[aws.ToString(vol.VolumeId)] @@ -3282,6 +2930,77 @@ func buildInstanceOpts(ctx context.Context, d *schema.ResourceData, meta any) (* return opts, nil } +func findInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.Instance, error) { + input := ec2.DescribeInstancesInput{ + InstanceIds: []string{id}, + } + + output, err := findInstance(ctx, conn, &input) + + if err != nil { + return nil, err + } + + if state := output.State.Name; state == awstypes.InstanceStateNameTerminated { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: &input, + } + } + + // Eventual consistency check. + if aws.ToString(output.InstanceId) != id { + return nil, &retry.NotFoundError{ + LastRequest: &input, + } + } + + return output, nil +} + +func findInstance(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) (*awstypes.Instance, error) { + var output []awstypes.Instance + for v, err := range listInstances(ctx, conn, input) { + if err != nil { + return nil, err + } + output = append(output, v) + } + + return tfresource.AssertSingleValueResult(output, func(v *awstypes.Instance) bool { return v.State != nil }) +} + +// DescribeInstances is an "All-Or-Some" call. +func listInstances(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) iter.Seq2[awstypes.Instance, error] { + return func(yield func(awstypes.Instance, error) bool) { + pages := ec2.NewDescribeInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { + yield(awstypes.Instance{}, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + }) + return + } + + if err != nil { + yield(awstypes.Instance{}, err) + return + } + + for _, v := range page.Reservations { + for _, instance := range v.Instances { + if !yield(instance, nil) { + return + } + } + } + } + } +} + // startInstance starts an EC2 instance and waits for the instance to start. func startInstance(ctx context.Context, conn *ec2.Client, id string, retry bool, timeout time.Duration) error { var err error @@ -3455,123 +3174,501 @@ func waitInstanceStarted(ctx context.Context, conn *ec2.Client, id string, timeo tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) } - return output, err + return output, err + } + + return nil, err +} + +func waitInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.InstanceStateNamePending, + awstypes.InstanceStateNameRunning, + awstypes.InstanceStateNameShuttingDown, + awstypes.InstanceStateNameStopping, + ), + Target: enum.Slice(awstypes.InstanceStateNameStopped), + Refresh: statusInstance(ctx, conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Instance); ok { + if stateReason := output.StateReason; stateReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + } + + return output, err + } + + return nil, err +} + +func userDataHashSum(userData string) string { + // Check whether the user_data is not Base64 encoded. + // Always calculate hash of base64 decoded value since we + // check against double-encoding when setting it. + v, err := itypes.Base64Decode(userData) + if err != nil { + v = []byte(userData) + } + + hash := sha1.Sum(v) + return hex.EncodeToString(hash[:]) +} + +func getInstanceVolIDs(ctx context.Context, conn *ec2.Client, instanceId string) ([]string, error) { + volIDs := []string{} + + input := ec2.DescribeVolumesInput{ + Filters: newAttributeFilterList(map[string]string{ + "attachment.instance-id": instanceId, + }), + } + resp, err := conn.DescribeVolumes(ctx, &input) + if err != nil { + return nil, fmt.Errorf("getting volumes: %w", err) + } + + for _, v := range resp.Volumes { + volIDs = append(volIDs, aws.ToString(v.VolumeId)) + } + + return volIDs, nil +} + +func getRootVolID(instance *awstypes.Instance) string { + volID := "" + for _, bd := range instance.BlockDeviceMappings { + if bd.Ebs != nil && blockDeviceIsRoot(bd, instance) { + if bd.Ebs.VolumeId != nil { + volID = aws.ToString(bd.Ebs.VolumeId) + } + break + } + } + + return volID +} + +func getVolIDByDeviceName(instance *awstypes.Instance, deviceName string) string { + volID := "" + for _, bd := range instance.BlockDeviceMappings { + if aws.ToString(bd.DeviceName) == deviceName { + if bd.Ebs != nil { + volID = aws.ToString(bd.Ebs.VolumeId) + break + } + } + } + + return volID +} + +func blockDeviceTagsDefined(d *schema.ResourceData) bool { + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]any) + for _, v := range vL { + bd := v.(map[string]any) + if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { + return true + } + } + } + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]any) + if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { + return true + } + } + } + + return false +} + +func resourceInstanceFlatten(ctx context.Context, client *conns.AWSClient, instance *awstypes.Instance, rd *schema.ResourceData) diag.Diagnostics { + var diags diag.Diagnostics + + conn := client.EC2Client(ctx) + + instanceType := string(instance.InstanceType) + instanceTypeInfo, err := findInstanceTypeByName(ctx, conn, instanceType) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance Type (%s): %s", instanceType, err) + } + + rd.Set("instance_state", instance.State.Name) + + if v := instance.Placement; v != nil { + rd.Set(names.AttrAvailabilityZone, v.AvailabilityZone) + rd.Set("host_id", v.HostId) + if v := v.HostResourceGroupArn; v != nil { + rd.Set("host_resource_group_arn", instance.Placement.HostResourceGroupArn) + } + rd.Set("placement_group", v.GroupName) + rd.Set("placement_group_id", v.GroupId) + rd.Set("placement_partition_number", v.PartitionNumber) + rd.Set("tenancy", v.Tenancy) + } + + if err := rd.Set("cpu_options", flattenCPUOptions(instance.CpuOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting cpu_options: %s", err) + } + + if v := instance.HibernationOptions; v != nil { + rd.Set("hibernation", v.Configured) + } + + if err := rd.Set("enclave_options", flattenEnclaveOptions(instance.EnclaveOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting enclave_options: %s", err) + } + + if instance.MaintenanceOptions != nil { + if err := rd.Set("maintenance_options", []any{flattenInstanceMaintenanceOptions(instance.MaintenanceOptions)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting maintenance_options: %s", err) + } + } else { + rd.Set("maintenance_options", nil) + } + + if err := rd.Set("metadata_options", flattenInstanceMetadataOptions(instance.MetadataOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting metadata_options: %s", err) + } + + if instance.PrivateDnsNameOptions != nil { + if err := rd.Set("private_dns_name_options", []any{flattenPrivateDNSNameOptionsResponse(instance.PrivateDnsNameOptions)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting private_dns_name_options: %s", err) + } + } else { + rd.Set("private_dns_name_options", nil) + } + + rd.Set("ami", instance.ImageId) + rd.Set(names.AttrInstanceType, instanceType) + rd.Set("key_name", instance.KeyName) + rd.Set("public_dns", instance.PublicDnsName) + rd.Set("public_ip", instance.PublicIpAddress) + rd.Set("private_dns", instance.PrivateDnsName) + rd.Set("private_ip", instance.PrivateIpAddress) + rd.Set("outpost_arn", instance.OutpostArn) + + if instance.IamInstanceProfile != nil && instance.IamInstanceProfile.Arn != nil { + name, err := instanceProfileARNToName(aws.ToString(instance.IamInstanceProfile.Arn)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting iam_instance_profile: %s", err) + } + + rd.Set("iam_instance_profile", name) + } else { + rd.Set("iam_instance_profile", nil) + } + + { + launchTemplate, err := flattenInstanceLaunchTemplate(ctx, conn, rd.Id(), rd.Get("launch_template.0.version").(string)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) launch template: %s", rd.Id(), err) + } + + if err := rd.Set(names.AttrLaunchTemplate, launchTemplate); err != nil { + return sdkdiag.AppendErrorf(diags, "setting launch_template: %s", err) + } + } + + // Set configured Network Interface Device Index Slice + // We only want to read, and populate state for the configured network_interface attachments. Otherwise, other + // resources have the potential to attach network interfaces to the instance, and cause a perpetual create/destroy + // diff. We should only read on changes configured for this specific resource because of this. + var configuredDeviceIndexes []int + if v, ok := rd.GetOk("network_interface"); ok { + vL := v.(*schema.Set).List() + for _, vi := range vL { + mVi := vi.(map[string]any) + configuredDeviceIndexes = append(configuredDeviceIndexes, mVi["device_index"].(int)) + } + } + + var secondaryPrivateIPs []string + var ipv6Addresses []string + if len(instance.NetworkInterfaces) > 0 { + var primaryNetworkInterface awstypes.InstanceNetworkInterface + var networkInterfaces []map[string]any + for _, iNi := range instance.NetworkInterfaces { + ni := make(map[string]any) + if aws.ToInt32(iNi.Attachment.DeviceIndex) == 0 { + primaryNetworkInterface = iNi + } + // If the attached network device is inside our configuration, refresh state with values found. + // Otherwise, assume the network device was attached via an outside resource. + for _, index := range configuredDeviceIndexes { + if index == int(aws.ToInt32(iNi.Attachment.DeviceIndex)) { + ni[names.AttrDeleteOnTermination] = aws.ToBool(iNi.Attachment.DeleteOnTermination) + ni["device_index"] = aws.ToInt32(iNi.Attachment.DeviceIndex) + ni["network_card_index"] = aws.ToInt32(iNi.Attachment.NetworkCardIndex) + ni[names.AttrNetworkInterfaceID] = aws.ToString(iNi.NetworkInterfaceId) + } + } + // Don't add empty network interfaces to schema + if len(ni) == 0 { + continue + } + networkInterfaces = append(networkInterfaces, ni) + } + if err := rd.Set("network_interface", networkInterfaces); err != nil { + return sdkdiag.AppendErrorf(diags, "setting network_interfaces: %v", err) + } + + // Set primary network interface details + // If an instance is shutting down, network interfaces are detached, and attributes may be nil, + // need to protect against nil pointer dereferences + if primaryNetworkInterface.NetworkInterfaceId != nil { + pni := map[string]any{ + names.AttrNetworkInterfaceID: aws.ToString(primaryNetworkInterface.NetworkInterfaceId), + names.AttrDeleteOnTermination: aws.ToBool(primaryNetworkInterface.Attachment.DeleteOnTermination), + } + if err := rd.Set("primary_network_interface", []any{pni}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting primary_network_interface for AWS Instance (%s): %s", rd.Id(), err) + } + + rd.Set("primary_network_interface_id", primaryNetworkInterface.NetworkInterfaceId) + if primaryNetworkInterface.SubnetId != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check + rd.Set(names.AttrSubnetID, primaryNetworkInterface.SubnetId) + } + rd.Set("ipv6_address_count", len(primaryNetworkInterface.Ipv6Addresses)) + if primaryNetworkInterface.SourceDestCheck != nil { // nosemgrep: ci.helper-schema-ResourceData-Set-extraneous-nil-check + rd.Set("source_dest_check", primaryNetworkInterface.SourceDestCheck) + } + + rd.Set("associate_public_ip_address", primaryNetworkInterface.Association != nil) + + for _, address := range primaryNetworkInterface.PrivateIpAddresses { + if !aws.ToBool(address.Primary) { + secondaryPrivateIPs = append(secondaryPrivateIPs, aws.ToString(address.PrivateIpAddress)) + } + } + + for _, address := range primaryNetworkInterface.Ipv6Addresses { + ipv6Addresses = append(ipv6Addresses, aws.ToString(address.Ipv6Address)) + } + + if len(primaryNetworkInterface.Ipv6Addresses) > 0 { + if err := rd.Set("enable_primary_ipv6", primaryNetworkInterface.Ipv6Addresses[0].IsPrimaryIpv6); err != nil { + return sdkdiag.AppendErrorf(diags, "setting enable_primary_ipv6: %s", err) + } + } + } + } else { + rd.Set("associate_public_ip_address", instance.PublicIpAddress != nil) + rd.Set("ipv6_address_count", 0) + rd.Set("primary_network_interface_id", "") + rd.Set(names.AttrSubnetID, instance.SubnetId) + } + + if err := rd.Set("secondary_private_ips", secondaryPrivateIPs); err != nil { + return sdkdiag.AppendErrorf(diags, "setting private_ips for AWS Instance (%s): %s", rd.Id(), err) + } + + if err := rd.Set("ipv6_addresses", ipv6Addresses); err != nil { + log.Printf("[WARN] Error setting ipv6_addresses for AWS Instance (%s): %s", rd.Id(), err) } - return nil, err -} + rd.Set("ebs_optimized", instance.EbsOptimized) + if aws.ToString(instance.SubnetId) != "" { + rd.Set("source_dest_check", instance.SourceDestCheck) + } -func waitInstanceStopped(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.Instance, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - awstypes.InstanceStateNamePending, - awstypes.InstanceStateNameRunning, - awstypes.InstanceStateNameShuttingDown, - awstypes.InstanceStateNameStopping, - ), - Target: enum.Slice(awstypes.InstanceStateNameStopped), - Refresh: statusInstance(ctx, conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + if instance.Monitoring != nil && instance.Monitoring.State != "" { + monitoringState := instance.Monitoring.State + rd.Set("monitoring", monitoringState == awstypes.MonitoringStateEnabled || monitoringState == awstypes.MonitoringStatePending) } - outputRaw, err := stateConf.WaitForStateContext(ctx) + setTagsOut(ctx, instance.Tags) + if _, ok := rd.GetOk("volume_tags"); ok && !blockDeviceTagsDefined(rd) { + volumeTags, err := readVolumeTags(ctx, conn, rd.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } - if output, ok := outputRaw.(*awstypes.Instance); ok { - if stateReason := output.StateReason; stateReason != nil { - tfresource.SetLastError(err, errors.New(aws.ToString(stateReason.Message))) + defaultTagsConfig := client.DefaultTagsConfig(ctx) + ignoreTagsConfig := client.IgnoreTagsConfig(ctx) + tags := keyValueTags(ctx, volumeTags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + if err := rd.Set("volume_tags", tags.ResolveDuplicates(ctx, defaultTagsConfig, ignoreTagsConfig, rd, "volume_tags", nil).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting volume_tags: %s", err) } + } - return output, err + if err := readSecurityGroups(ctx, rd, instance, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) } - return nil, err -} + // Retrieve instance shutdown behavior + if err := readInstanceShutdownBehavior(ctx, rd, conn); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } -func userDataHashSum(userData string) string { - // Check whether the user_data is not Base64 encoded. - // Always calculate hash of base64 decoded value since we - // check against double-encoding when setting it. - v, err := itypes.Base64Decode(userData) - if err != nil { - v = []byte(userData) + if err := readBlockDevices(ctx, rd, client, instance, false); err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) } - hash := sha1.Sum(v) - return hex.EncodeToString(hash[:]) -} + if _, ok := rd.GetOk("ephemeral_block_device"); !ok { + rd.Set("ephemeral_block_device", []any{}) + } -func getInstanceVolIDs(ctx context.Context, conn *ec2.Client, instanceId string) ([]string, error) { - volIDs := []string{} + // ARN - input := ec2.DescribeVolumesInput{ - Filters: newAttributeFilterList(map[string]string{ - "attachment.instance-id": instanceId, - }), - } - resp, err := conn.DescribeVolumes(ctx, &input) - if err != nil { - return nil, fmt.Errorf("getting volumes: %w", err) - } + rd.Set(names.AttrARN, instanceARN(ctx, client, rd.Id())) - for _, v := range resp.Volumes { - volIDs = append(volIDs, aws.ToString(v.VolumeId)) + // Instance attributes + { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameDisableApiStop, + InstanceId: aws.String(rd.Id()), + } + attr, err := conn.DescribeInstanceAttribute(ctx, &input) + if err != nil && !errs.IsUnsupportedOperationInPartitionError(client.Partition(ctx), err) { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiStop, err) + } + if !errs.IsUnsupportedOperationInPartitionError(client.Partition(ctx), err) { + rd.Set("disable_api_stop", attr.DisableApiStop.Value) + } } + { + if isSnowballEdgeInstance(rd.Id()) { + log.Printf("[INFO] Determined deploying to Snowball Edge based off Instance ID %s. Skip setting the 'disable_api_termination' attribute.", rd.Id()) + } else { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameDisableApiTermination, + InstanceId: aws.String(rd.Id()), + } + output, err := conn.DescribeInstanceAttribute(ctx, &input) - return volIDs, nil -} + if err != nil { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameDisableApiTermination, err) + } -func getRootVolID(instance *awstypes.Instance) string { - volID := "" - for _, bd := range instance.BlockDeviceMappings { - if bd.Ebs != nil && blockDeviceIsRoot(bd, instance) { - if bd.Ebs.VolumeId != nil { - volID = aws.ToString(bd.Ebs.VolumeId) + rd.Set("disable_api_termination", output.DisableApiTermination.Value) + } + } + { + input := ec2.DescribeInstanceAttributeInput{ + Attribute: awstypes.InstanceAttributeNameUserData, + InstanceId: aws.String(rd.Id()), + } + attr, err := conn.DescribeInstanceAttribute(ctx, &input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "getting attribute (%s): %s", awstypes.InstanceAttributeNameUserData, err) + } + if attr.UserData != nil && attr.UserData.Value != nil { + // Since user_data and user_data_base64 conflict with each other, + // we'll only set one or the other here to avoid a perma-diff. + // Since user_data_base64 was added later, we'll prefer to set + // user_data. + _, b64 := rd.GetOk("user_data_base64") + if b64 { + rd.Set("user_data_base64", attr.UserData.Value) + } else { + data, err := itypes.Base64Decode(aws.ToString(attr.UserData.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "decoding user_data: %s", err) + } + rd.Set("user_data", string(data)) } - break } } - return volID -} + // AWS Standard will return InstanceCreditSpecification.NotSupported errors for EC2 Instance IDs outside T2 and T3 instance types + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8055 + if aws.ToBool(instanceTypeInfo.BurstablePerformanceSupported) { + instanceCreditSpecification, err := findInstanceCreditSpecificationByID(ctx, conn, rd.Id()) -func getVolIDByDeviceName(instance *awstypes.Instance, deviceName string) string { - volID := "" - for _, bd := range instance.BlockDeviceMappings { - if aws.ToString(bd.DeviceName) == deviceName { - if bd.Ebs != nil { - volID = aws.ToString(bd.Ebs.VolumeId) - break + // Ignore UnsupportedOperation errors for AWS China and GovCloud (US). + // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/4362. + if tfawserr.ErrCodeEquals(err, errCodeUnsupportedOperation) { + err = nil + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s) credit specification: %s", rd.Id(), err) + } + + if instanceCreditSpecification != nil { + if err := rd.Set("credit_specification", []any{flattenInstanceCreditSpecification(instanceCreditSpecification)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting credit_specification: %s", err) } + } else { + rd.Set("credit_specification", nil) } } - return volID -} + if rd.Get("get_password_data").(bool) { + passwordData, err := getInstancePasswordData(ctx, aws.ToString(instance.InstanceId), conn, rd.Timeout(schema.TimeoutRead)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instance (%s): %s", rd.Id(), err) + } + rd.Set("password_data", passwordData) + } else { + rd.Set("get_password_data", false) + rd.Set("password_data", nil) + } -func blockDeviceTagsDefined(d *schema.ResourceData) bool { - if v, ok := d.GetOk("root_block_device"); ok { - vL := v.([]any) - for _, v := range vL { - bd := v.(map[string]any) - if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { - return true - } + if instance.CapacityReservationSpecification != nil { + if err := rd.Set("capacity_reservation_specification", []any{flattenCapacityReservationSpecificationResponse(instance.CapacityReservationSpecification)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting capacity_reservation_specification: %s", err) } + } else { + rd.Set("capacity_reservation_specification", nil) } - if v, ok := d.GetOk("ebs_block_device"); ok { - vL := v.(*schema.Set).List() - for _, v := range vL { - bd := v.(map[string]any) - if blockDeviceTags, ok := bd[names.AttrTags].(map[string]any); ok && len(blockDeviceTags) > 0 { - return true - } + if spotInstanceRequestID := aws.ToString(instance.SpotInstanceRequestId); spotInstanceRequestID != "" && instance.InstanceLifecycle != "" { + rd.Set("instance_lifecycle", instance.InstanceLifecycle) + rd.Set("spot_instance_request_id", spotInstanceRequestID) + + input := ec2.DescribeSpotInstanceRequestsInput{ + SpotInstanceRequestIds: []string{spotInstanceRequestID}, + } + + apiObject, err := findSpotInstanceRequest(ctx, conn, &input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Spot Instance Request (%s): %s", spotInstanceRequestID, err) + } + + tfMap := map[string]any{ + "instance_interruption_behavior": apiObject.InstanceInterruptionBehavior, + "spot_instance_type": apiObject.Type, + } + + if v := apiObject.SpotPrice; v != nil { + tfMap["max_price"] = aws.ToString(v) + } + + if v := apiObject.ValidUntil; v != nil { + tfMap["valid_until"] = aws.ToTime(v).Format(time.RFC3339) + } + + if err := rd.Set("instance_market_options", []any{map[string]any{ + "market_type": awstypes.MarketTypeSpot, + "spot_options": []any{tfMap}, + }}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting instance_market_options: %s", err) } + } else { + rd.Set("instance_lifecycle", nil) + rd.Set("instance_market_options", nil) + rd.Set("spot_instance_request_id", nil) } - return false + return diags } func expandInstanceMetadataOptions(l []any) *awstypes.InstanceMetadataOptionsRequest { @@ -4186,6 +4283,179 @@ func hasCommonElement(slice1 []awstypes.ArchitectureType, slice2 []awstypes.Arch } return false } + func instanceARN(ctx context.Context, c *conns.AWSClient, instanceID string) string { return c.RegionalARN(ctx, names.EC2, "instance/"+instanceID) } + +var _ list.ListResourceWithRawV5Schemas = &instanceListResource{} + +type instanceListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type instanceListResourceModel struct { + framework.WithRegionModel + Filters customListFilters `tfsdk:"filter"` + IncludeAutoScaled types.Bool `tfsdk:"include_auto_scaled"` +} + +// ListResourceConfigSchema defines the schema for the List configuration +// might be able to intercept or wrap this for simplicity +func (l *instanceListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{ + "include_auto_scaled": listschema.BoolAttribute{ + Description: "Whether to include instances that are part of an Auto Scaling group. Auto scaled instances are excluded by default.", + Optional: true, + }, + }, + Blocks: map[string]listschema.Block{ + names.AttrFilter: customListFiltersBlock(ctx), + }, + } +} + +func (l *instanceListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.EC2Client(ctx) + + var query instanceListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input ec2.DescribeInstancesInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + // If no instance-state filter is set, default to all states except terminated and shutting-down + if !slices.ContainsFunc(input.Filters, func(i awstypes.Filter) bool { + return aws.ToString(i.Name) == "instance-state-name" || aws.ToString(i.Name) == "instance-state-code" + }) { + states := enum.Slice(slices.DeleteFunc(enum.EnumValues[awstypes.InstanceStateName](), func(s awstypes.InstanceStateName) bool { + return s == awstypes.InstanceStateNameTerminated || s == awstypes.InstanceStateNameShuttingDown + })...) + input.Filters = append(input.Filters, awstypes.Filter{ + Name: aws.String("instance-state-name"), + Values: states, + }) + } + + includeAutoScaled := query.IncludeAutoScaled.ValueBool() + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + + for output, err := range listInstances(ctx, conn, &input) { + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + tags := keyValueTags(ctx, output.Tags) + + if !includeAutoScaled { + // Exclude Auto Scaled Instances + if v, ok := tags["aws:autoscaling:groupName"]; ok && v.ValueString() != "" { + continue + } + } + + rd := l.ResourceData() + rd.SetId(aws.ToString(output.InstanceId)) + result.Diagnostics.Append(translateDiags(resourceInstanceFlatten(ctx, awsClient, &output, rd))...) + if result.Diagnostics.HasError() { + yield(result) + return + } + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + if v, ok := tags["Name"]; ok { + result.DisplayName = v.ValueString() + } else { + result.DisplayName = aws.ToString(output.InstanceId) + } + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +func translateDiags(in diag.Diagnostics) frameworkdiag.Diagnostics { + out := make(frameworkdiag.Diagnostics, len(in)) + for i, diagIn := range in { + var diagOut frameworkdiag.Diagnostic + if diagIn.Severity == diag.Error { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewErrorDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeErrorDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } else { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewWarningDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeWarningDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } + out[i] = diagOut + } + return out +} + +func translatePath(in cty.Path) path.Path { + var out path.Path + + if len(in) == 0 { + return out + } + + step := in[0] + switch v := step.(type) { + case cty.GetAttrStep: + out = path.Root(v.Name) + } + + for i := 1; i < len(in); i++ { + step := in[i] + switch v := step.(type) { + case cty.GetAttrStep: + out = out.AtName(v.Name) + + case cty.IndexStep: + switch v.Key.Type() { + case cty.Number: + v, _ := v.Key.AsBigFloat().Int64() + out = out.AtListIndex(int(v)) + case cty.String: + out = out.AtMapKey(v.Key.AsString()) + } + } + } + + return out +} diff --git a/internal/service/ec2/ec2_instance_data_source.go b/internal/service/ec2/ec2_instance_data_source.go index de915bb8cedb..630e4ec6073d 100644 --- a/internal/service/ec2/ec2_instance_data_source.go +++ b/internal/service/ec2/ec2_instance_data_source.go @@ -555,7 +555,7 @@ func instanceDescriptionAttributes(ctx context.Context, d *schema.ResourceData, } // Block devices - if err := readBlockDevices(ctx, d, meta, instance, true); err != nil { + if err := readBlockDevices(ctx, d, meta.(*conns.AWSClient), instance, true); err != nil { return fmt.Errorf("reading EC2 Instance (%s): %w", aws.ToString(instance.InstanceId), err) } if _, ok := d.GetOk("ephemeral_block_device"); !ok { diff --git a/internal/service/ec2/ec2_instance_list_test.go b/internal/service/ec2/ec2_instance_list_test.go new file mode 100644 index 000000000000..3abb34281166 --- /dev/null +++ b/internal/service/ec2/ec2_instance_list_test.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEC2Instance_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_instance.test[0]" + resourceName2 := "aws_instance.test[1]" + resourceName3 := "aws_instance.test[2]" + + var id1, id2, id3 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_basic/"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.test.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.test.1", names.AttrID, getter(&id2)), + resource.TestCheckResourceAttrWith("aws_instance.test.2", names.AttrID, getter(&id3)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_basic/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.StringFunc(checker(&id1)), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.StringFunc(checker(&id2)), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.StringFunc(checker(&id3)), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_instance.test[0]" + resourceName2 := "aws_instance.test[1]" + resourceName3 := "aws_instance.test[2]" + + var id1, id2, id3 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.test.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.test.1", names.AttrID, getter(&id2)), + resource.TestCheckResourceAttrWith("aws_instance.test.2", names.AttrID, getter(&id3)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNAlternateRegionFormat(resourceName3, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_region_override/"), + ConfigVariables: config.Variables{ + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.StringFunc(checker(&id1)), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.StringFunc(checker(&id2)), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrID: knownvalue.StringFunc(checker(&id3)), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_Filtered(t *testing.T) { + ctx := acctest.Context(t) + + resourceNameExpected1 := "aws_instance.expected[0]" + resourceNameExpected2 := "aws_instance.expected[1]" + resourceNameNotExpected1 := "aws_instance.not_expected[0]" + resourceNameNotExpected2 := "aws_instance.not_expected[1]" + + var id1, id2 string + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_filtered/"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("aws_instance.expected.0", names.AttrID, getter(&id1)), + resource.TestCheckResourceAttrWith("aws_instance.expected.1", names.AttrID, getter(&id2)), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameExpected2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected1, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + tfstatecheck.ExpectRegionalARNFormat(resourceNameNotExpected2, tfjsonpath.New(names.AttrARN), "ec2", "instance/{id}"), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_filtered/"), + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.StringFunc(checker(&id1)), + }), + + querycheck.ExpectIdentity("aws_instance.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrID: knownvalue.StringFunc(checker(&id2)), + }), + }, + }, + }, + }) +} + +func TestAccEC2Instance_List_ExcludeAutoScaled(t *testing.T) { + t.Skip("Skipping because zero-result queries cause a failure now") + + ctx := acctest.Context(t) + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + CheckDestroy: testAccCheckInstanceDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_exclude_autoscaled/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{}, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Instance/list_exclude_autoscaled/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectLength("aws_instance.excluded", 0), + + querycheck.ExpectLength("aws_instance.included", 1), + }, + }, + }, + }) +} + +// TODO: Temporary until there is more testing support +func getter(s *string) resource.CheckResourceAttrWithFunc { + return func(v string) error { + *s = v + return nil + } +} + +// TODO: Temporary until there is more testing support +func checker(s *string) func(string) error { + return func(v string) error { + if v != *s { + return fmt.Errorf("expected %q, got %q", *s, v) + } + return nil + } +} diff --git a/internal/service/ec2/ec2_instance_state_test.go b/internal/service/ec2/ec2_instance_state_test.go index 65836cb9d48f..0193749f7766 100644 --- a/internal/service/ec2/ec2_instance_state_test.go +++ b/internal/service/ec2/ec2_instance_state_test.go @@ -29,7 +29,7 @@ func TestAccEC2InstanceState_basic(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(state, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(state, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -53,7 +53,7 @@ func TestAccEC2InstanceState_state(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(stateStopped, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(stateStopped, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -66,7 +66,7 @@ func TestAccEC2InstanceState_state(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccInstanceStateConfig_basic(stateRunning, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(stateRunning, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, names.AttrInstanceID), @@ -90,7 +90,7 @@ func TestAccEC2InstanceState_disappears_Instance(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccInstanceStateConfig_basic(state, acctest.CtFalse), + Config: testAccInstanceStateConfig_basic(state, false), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceStateExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfec2.ResourceInstance(), parentResourceName), @@ -128,7 +128,7 @@ func testAccCheckInstanceStateExists(ctx context.Context, n string) resource.Tes } } -func testAccInstanceStateConfig_basic(state string, force string) string { +func testAccInstanceStateConfig_basic(state string, force bool) string { return acctest.ConfigCompose( acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), acctest.AvailableEC2InstanceTypeForRegion("t3.micro", "t2.micro", "t1.micro", "m1.small"), @@ -141,7 +141,7 @@ resource "aws_instance" "test" { resource "aws_ec2_instance_state" "test" { instance_id = aws_instance.test.id state = %[1]q - force = %[2]s + force = %[2]t } `, state, force)) } diff --git a/internal/service/ec2/ec2_instances_data_source.go b/internal/service/ec2/ec2_instances_data_source.go index e0ed43bdfdaa..c6dbcdedb8b1 100644 --- a/internal/service/ec2/ec2_instances_data_source.go +++ b/internal/service/ec2/ec2_instances_data_source.go @@ -94,15 +94,13 @@ func dataSourceInstancesRead(ctx context.Context, d *schema.ResourceData, meta a input.Filters = nil } - output, err := findInstances(ctx, conn, &input) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Instances: %s", err) - } - var instanceIDs, privateIPs, publicIPs, ipv6Addresses []string - for _, v := range output { + for v, err := range listInstances(ctx, conn, &input) { + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Instances: %s", err) + } + instanceIDs = append(instanceIDs, aws.ToString(v.InstanceId)) if privateIP := aws.ToString(v.PrivateIpAddress); privateIP != "" { privateIPs = append(privateIPs, privateIP) diff --git a/internal/service/ec2/ec2_spot_instance_request.go b/internal/service/ec2/ec2_spot_instance_request.go index 7d51e47e77e1..b7d9e1875c2a 100644 --- a/internal/service/ec2/ec2_spot_instance_request.go +++ b/internal/service/ec2/ec2_spot_instance_request.go @@ -358,7 +358,7 @@ func readInstance(ctx context.Context, d *schema.ResourceData, meta any) diag.Di "host": *instance.PrivateIpAddress, }) } - if err := readBlockDevices(ctx, d, meta, instance, false); err != nil { + if err := readBlockDevices(ctx, d, meta.(*conns.AWSClient), instance, false); err != nil { return sdkdiag.AppendFromErr(diags, err) } diff --git a/internal/service/ec2/ec2_stop_instance_action.go b/internal/service/ec2/ec2_stop_instance_action.go new file mode 100644 index 000000000000..a9e9a84146a8 --- /dev/null +++ b/internal/service/ec2/ec2_stop_instance_action.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + "fmt" + "slices" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_ec2_stop_instance, name="Stop Instance") +func newStopInstanceAction(_ context.Context) (action.ActionWithConfigure, error) { + return &stopInstanceAction{}, nil +} + +var ( + _ action.Action = (*stopInstanceAction)(nil) +) + +type stopInstanceAction struct { + framework.ActionWithModel[stopInstanceModel] +} + +type stopInstanceModel struct { + framework.WithRegionModel + InstanceID types.String `tfsdk:"instance_id"` + Force types.Bool `tfsdk:"force"` + Timeout types.Int64 `tfsdk:"timeout"` +} + +func (a *stopInstanceAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state.", + Attributes: map[string]schema.Attribute{ + names.AttrInstanceID: schema.StringAttribute{ + Description: "The ID of the EC2 instance to stop", + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches( + regexache.MustCompile(`^i-[0-9a-f]{8,17}$`), + "must be a valid EC2 instance ID (e.g., i-1234567890abcdef0)", + ), + }, + }, + "force": schema.BoolAttribute{ + Description: "Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances.", + Optional: true, + }, + names.AttrTimeout: schema.Int64Attribute{ + Description: "Timeout in seconds to wait for the instance to stop (default: 600)", + Optional: true, + Validators: []validator.Int64{ + int64validator.AtLeast(30), + int64validator.AtMost(3600), + }, + }, + }, + } +} + +func (a *stopInstanceAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config stopInstanceModel + + // Parse configuration + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Get AWS client + conn := a.Meta().EC2Client(ctx) + + instanceID := config.InstanceID.ValueString() + force := config.Force.ValueBool() + + // Set default timeout if not provided + timeout := 600 * time.Second + if !config.Timeout.IsNull() { + timeout = time.Duration(config.Timeout.ValueInt64()) * time.Second + } + + tflog.Info(ctx, "Starting EC2 stop instance action", map[string]any{ + names.AttrInstanceID: instanceID, + "force": force, + names.AttrTimeout: timeout.String(), + }) + + // Send initial progress update + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Starting stop operation for EC2 instance %s...", instanceID), + }) + + // Check current instance state first + instance, err := findInstanceByID(ctx, conn, instanceID) + if err != nil { + if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { + resp.Diagnostics.AddError( + "Instance Not Found", + fmt.Sprintf("EC2 instance %s was not found", instanceID), + ) + return + } + resp.Diagnostics.AddError( + "Failed to Describe Instance", + fmt.Sprintf("Could not describe EC2 instance %s: %s", instanceID, err), + ) + return + } + + currentState := string(instance.State.Name) + tflog.Debug(ctx, "Current instance state", map[string]any{ + names.AttrInstanceID: instanceID, + names.AttrState: currentState, + }) + + // Check if instance is already stopped + if instance.State.Name == awstypes.InstanceStateNameStopped { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s is already stopped", instanceID), + }) + tflog.Info(ctx, "Instance already stopped", map[string]any{ + names.AttrInstanceID: instanceID, + }) + return + } + + // Check if instance is in a state that can be stopped + if !canStopInstance(instance.State.Name) { + resp.Diagnostics.AddError( + "Cannot Stop Instance", + fmt.Sprintf("EC2 instance %s is in state '%s' and cannot be stopped. Instance must be in 'running' or 'stopping' state.", instanceID, currentState), + ) + return + } + + // If instance is already stopping, just wait for it + if instance.State.Name == awstypes.InstanceStateNameStopping { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s is already stopping, waiting for completion...", instanceID), + }) + } else { + // Stop the instance + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Sending stop command to EC2 instance %s...", instanceID), + }) + + input := ec2.StopInstancesInput{ + Force: aws.Bool(force), + InstanceIds: []string{instanceID}, + } + + _, err = conn.StopInstances(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Stop Instance", + fmt.Sprintf("Could not stop EC2 instance %s: %s", instanceID, err), + ) + return + } + + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Stop command sent to EC2 instance %s, waiting for instance to stop...", instanceID), + }) + } + + // Wait for instance to stop with periodic progress updates + err = a.waitForInstanceStopped(ctx, conn, instanceID, timeout, resp) + if err != nil { + resp.Diagnostics.AddError( + "Timeout Waiting for Instance to Stop", + fmt.Sprintf("EC2 instance %s did not stop within %s: %s", instanceID, timeout, err), + ) + return + } + + // Final success message + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s has been successfully stopped", instanceID), + }) + + tflog.Info(ctx, "EC2 stop instance action completed successfully", map[string]any{ + names.AttrInstanceID: instanceID, + }) +} + +// canStopInstance checks if an instance can be stopped based on its current state +func canStopInstance(state awstypes.InstanceStateName) bool { + switch state { + case awstypes.InstanceStateNameRunning, awstypes.InstanceStateNameStopping: + return true + default: + return false + } +} + +// waitForInstanceStopped waits for an instance to reach the stopped state with progress updates +func (a *stopInstanceAction) waitForInstanceStopped(ctx context.Context, conn *ec2.Client, instanceID string, timeout time.Duration, resp *action.InvokeResponse) error { + const ( + pollInterval = 10 * time.Second + progressInterval = 30 * time.Second + ) + + deadline := time.Now().Add(timeout) + lastProgressUpdate := time.Now() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Check if we've exceeded the timeout + if time.Now().After(deadline) { + return fmt.Errorf("timeout after %s", timeout) + } + + // Get current instance state + instance, err := findInstanceByID(ctx, conn, instanceID) + if err != nil { + return fmt.Errorf("describing instance: %w", err) + } + + currentState := string(instance.State.Name) + + // Send progress update every 30 seconds + if time.Since(lastProgressUpdate) >= progressInterval { + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("EC2 instance %s is currently in state '%s', continuing to wait for 'stopped'...", instanceID, currentState), + }) + lastProgressUpdate = time.Now() + } + + // Check if we've reached the target state + if instance.State.Name == awstypes.InstanceStateNameStopped { + return nil + } + + // Check if we're in an unexpected state + validStates := []awstypes.InstanceStateName{ + awstypes.InstanceStateNameRunning, + awstypes.InstanceStateNameStopping, + awstypes.InstanceStateNameShuttingDown, + } + if !slices.Contains(validStates, instance.State.Name) { + return fmt.Errorf("instance entered unexpected state: %s", currentState) + } + + // Wait before next poll + time.Sleep(pollInterval) + } +} diff --git a/internal/service/ec2/ec2_stop_instance_action_test.go b/internal/service/ec2/ec2_stop_instance_action_test.go new file mode 100644 index 000000000000..b1d345f7d1be --- /dev/null +++ b/internal/service/ec2/ec2_stop_instance_action_test.go @@ -0,0 +1,338 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "context" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccEC2StopInstanceAction_basic(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Instance + resourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.EC2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + Steps: []resource.TestStep{ + { + Config: testAccStopInstanceActionConfig_force(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExistsLocal(ctx, resourceName, &v), + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameRunning), + ), + }, + { + PreConfig: func() { + if v.InstanceId == nil { + t.Fatal("Instance ID is nil") + } + + if err := invokeStopInstanceAction(ctx, t, *v.InstanceId, true); err != nil { + t.Fatalf("Failed to invoke stop instance action: %v", err) + } + }, + Config: testAccStopInstanceActionConfig_force(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameStopped), + ), + }, + }, + }) +} + +func TestAccEC2StopInstanceAction_trigger(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Instance + resourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.EC2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + Steps: []resource.TestStep{ + { + Config: testAccStopInstanceActionConfig_trigger(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExistsLocal(ctx, resourceName, &v), + testAccCheckInstanceState(ctx, resourceName, awstypes.InstanceStateNameStopped), + ), + }, + }, + }) +} + +func testAccCheckInstanceExistsLocal(ctx context.Context, n string, v *awstypes.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No EC2 Instance ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + instance, err := tfec2.FindInstanceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + *v = *instance + + return nil + } +} + +func testAccCheckInstanceState(ctx context.Context, n string, expectedState awstypes.InstanceStateName) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No EC2 Instance ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) + + instance, err := tfec2.FindInstanceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + if instance.State.Name != expectedState { + return fmt.Errorf("Expected instance state %s, got %s", expectedState, instance.State.Name) + } + + return nil + } +} + +func testAccStopInstanceActionConfig_force(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.ConfigAvailableAZsNoOptIn(), + acctest.AvailableEC2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } +} + +action "aws_ec2_stop_instance" "test" { + config { + instance_id = aws_instance.test.id + force = true + } +} +`, rName)) +} + +func testAccStopInstanceActionConfig_trigger(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), + acctest.ConfigAvailableAZsNoOptIn(), + acctest.AvailableEC2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } +} + +action "aws_ec2_stop_instance" "test" { + config { + instance_id = aws_instance.test.id + force = true + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ec2_stop_instance.test] + } + } +} +`, rName)) +} + +// Step 1: Get the AWS provider as a ProviderServerWithActions +func providerWithActions(ctx context.Context, t *testing.T) tfprotov5.ProviderServerWithActions { //nolint:staticcheck // SA1019: Working in alpha situation + t.Helper() + + factories := acctest.ProtoV5ProviderFactories + providerFactory, exists := factories["aws"] + if !exists { + t.Fatal("AWS provider factory not found in ProtoV5ProviderFactories") + } + + providerServer, err := providerFactory() + if err != nil { + t.Fatalf("Failed to create provider server: %v", err) + } + + providerWithActions, ok := providerServer.(tfprotov5.ProviderServerWithActions) //nolint:staticcheck // SA1019: Working in alpha situation + if !ok { + t.Fatal("Provider does not implement ProviderServerWithActions") + } + + schemaResp, err := providerWithActions.GetProviderSchema(ctx, &tfprotov5.GetProviderSchemaRequest{}) + if err != nil { + t.Fatalf("Failed to get provider schema: %v", err) + } + + if len(schemaResp.ActionSchemas) == 0 { + t.Fatal("Expected to find action schemas but didn't find any!") + } + + providerConfigValue, err := buildProviderConfiguration(t, schemaResp.Provider) + if err != nil { + t.Fatalf("Failed to build provider configuration: %v", err) + } + + configureResp, err := providerWithActions.ConfigureProvider(ctx, &tfprotov5.ConfigureProviderRequest{ + TerraformVersion: "1.0.0", + Config: providerConfigValue, + }) + if err != nil { + t.Fatalf("Failed to configure provider: %v", err) + } + + if len(configureResp.Diagnostics) > 0 { + var diagMessages []string + for _, diag := range configureResp.Diagnostics { + diagMessages = append(diagMessages, fmt.Sprintf("Severity: %s, Summary: %s, Detail: %s", diag.Severity, diag.Summary, diag.Detail)) + } + t.Fatalf("Provider configuration failed: %v", diagMessages) + } + + return providerWithActions +} + +// buildProviderConfiguration creates a minimal provider configuration from the schema +func buildProviderConfiguration(t *testing.T, providerSchema *tfprotov5.Schema) (*tfprotov5.DynamicValue, error) { + t.Helper() + + providerType := providerSchema.Block.ValueType() + configMap := make(map[string]tftypes.Value) + + if objType, ok := providerType.(tftypes.Object); ok { + for attrName, attrType := range objType.AttributeTypes { + configMap[attrName] = tftypes.NewValue(attrType, nil) + } + } + + configValue, err := tfprotov5.NewDynamicValue( + providerType, + tftypes.NewValue(providerType, configMap), + ) + if err != nil { + return nil, fmt.Errorf("failed to create config: %w", err) + } + + return &configValue, nil +} + +// Step 2: Build action configuration +func buildStopInstanceActionConfig(instanceID string, force bool) (tftypes.Type, map[string]tftypes.Value) { + configType := tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + names.AttrInstanceID: tftypes.String, + "force": tftypes.Bool, + names.AttrTimeout: tftypes.Number, + names.AttrRegion: tftypes.String, + }, + } + + config := map[string]tftypes.Value{ + names.AttrInstanceID: tftypes.NewValue(tftypes.String, instanceID), + "force": tftypes.NewValue(tftypes.Bool, force), + names.AttrTimeout: tftypes.NewValue(tftypes.Number, nil), + names.AttrRegion: tftypes.NewValue(tftypes.String, nil), + } + + return configType, config +} + +// Step 3: Programmatic action invocation +func invokeStopInstanceAction(ctx context.Context, t *testing.T, instanceID string, force bool) error { + t.Helper() + + p := providerWithActions(ctx, t) + configType, configMap := buildStopInstanceActionConfig(instanceID, force) + actionTypeName := "aws_ec2_stop_instance" + + testConfig, err := tfprotov5.NewDynamicValue( + configType, + tftypes.NewValue(configType, configMap), + ) + if err != nil { + return fmt.Errorf("failed to create config: %w", err) + } + + invokeResp, err := p.InvokeAction(ctx, &tfprotov5.InvokeActionRequest{ + ActionType: actionTypeName, + Config: &testConfig, + }) + if err != nil { + return fmt.Errorf("invoke failed: %w", err) + } + + // Process events and check for completion + for event := range invokeResp.Events { + switch eventType := event.Type.(type) { + case tfprotov5.ProgressInvokeActionEventType: + t.Logf("Progress: %s", eventType.Message) + case tfprotov5.CompletedInvokeActionEventType: + return nil + default: + // Handle any other event types or errors + t.Logf("Received event type: %T", eventType) + } + } + + return nil +} diff --git a/internal/service/ec2/filters.go b/internal/service/ec2/filters.go index e488fbcc91cc..475b8930b0af 100644 --- a/internal/service/ec2/filters.go +++ b/internal/service/ec2/filters.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" datasourceschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -169,6 +170,31 @@ type ( customFilters = fwtypes.SetNestedObjectValueOf[customFilterModel] ) +func customListFiltersBlock(ctx context.Context) listschema.ListNestedBlock { + return listschema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[customListFilterModel](ctx), + NestedObject: listschema.NestedBlockObject{ + Attributes: map[string]listschema.Attribute{ + names.AttrName: listschema.StringAttribute{ + Required: true, + }, + names.AttrValues: listschema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Required: true, + }, + }, + }, + } +} + +type customListFilterModel struct { + Name types.String `tfsdk:"name"` + Values fwtypes.ListOfString `tfsdk:"values"` +} + +type customListFilters = fwtypes.ListNestedObjectValueOf[customListFilterModel] + // newCustomFilterList takes the set value extracted from a schema // attribute conforming to the schema returned by CustomFiltersSchema, // and transforms it into a []*ec2.Filter representing the same filter diff --git a/internal/service/ec2/find.go b/internal/service/ec2/find.go index 8c1b6267bb39..5376ddd63e7e 100644 --- a/internal/service/ec2/find.go +++ b/internal/service/ec2/find.go @@ -434,70 +434,6 @@ func findInstanceCreditSpecificationByID(ctx context.Context, conn *ec2.Client, return output, nil } -func findInstances(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) ([]awstypes.Instance, error) { - var output []awstypes.Instance - - pages := ec2.NewDescribeInstancesPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if tfawserr.ErrCodeEquals(err, errCodeInvalidInstanceIDNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: &input, - } - } - - if err != nil { - return nil, err - } - - for _, v := range page.Reservations { - output = append(output, v.Instances...) - } - } - - return output, nil -} - -func findInstance(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstancesInput) (*awstypes.Instance, error) { - output, err := findInstances(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSingleValueResult(output, func(v *awstypes.Instance) bool { return v.State != nil }) -} - -func findInstanceByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.Instance, error) { - input := ec2.DescribeInstancesInput{ - InstanceIds: []string{id}, - } - - output, err := findInstance(ctx, conn, &input) - - if err != nil { - return nil, err - } - - if state := output.State.Name; state == awstypes.InstanceStateNameTerminated { - return nil, &retry.NotFoundError{ - Message: string(state), - LastRequest: &input, - } - } - - // Eventual consistency check. - if aws.ToString(output.InstanceId) != id { - return nil, &retry.NotFoundError{ - LastRequest: &input, - } - } - - return output, nil -} - func findInstanceStatus(ctx context.Context, conn *ec2.Client, input *ec2.DescribeInstanceStatusInput) (*awstypes.InstanceStatus, error) { output, err := findInstanceStatuses(ctx, conn, input) diff --git a/internal/service/ec2/service_package_gen.go b/internal/service/ec2/service_package_gen.go index 525aaad6f819..adede89c4d55 100644 --- a/internal/service/ec2/service_package_gen.go +++ b/internal/service/ec2/service_package_gen.go @@ -4,6 +4,8 @@ package ec2 import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" @@ -17,6 +19,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newStopInstanceAction, + TypeName: "aws_ec2_stop_instance", + Name: "Stop Instance", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{ { @@ -1799,6 +1812,21 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource{ + { + Factory: instanceResourceAsListResource, + TypeName: "aws_instance", + Name: "Instance", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrID), + }, + }) +} + func (p *servicePackage) ServicePackageName() string { return names.EC2 } diff --git a/internal/service/ec2/testdata/Instance/list_basic/main.tf b/internal/service/ec2/testdata/Instance/list_basic/main.tf new file mode 100644 index 000000000000..d88aa6324cfe --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_basic/main.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "test" { + count = 3 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} diff --git a/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..ed585f6b87be --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws +} diff --git a/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf new file mode 100644 index 000000000000..f994bc816b2c --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_autoscaling_group" "test" { + name = var.rName + availability_zones = [data.aws_availability_zones.available.names[0]] + + max_size = 1 + min_size = 1 + desired_capacity = 1 + + launch_template { + id = aws_launch_template.test.id + version = aws_launch_template.test.default_version + } + + tag { + key = "test-filter" + value = var.rName + propagate_at_launch = true + } +} + +resource "aws_launch_template" "test" { + name = var.rName + image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + +# acctest.ConfigAvailableAZsNoOptInDefaultExclude() + +data "aws_availability_zones" "available" { + exclude_zone_ids = local.default_exclude_zone_ids + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +locals { + default_exclude_zone_ids = ["usw2-az4", "usgw1-az2"] +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl new file mode 100644 index 000000000000..d3efe088c97f --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_exclude_autoscaled/main.tfquery.hcl @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "excluded" { + provider = aws + + config { + filter { + name = "tag:test-filter" + values = [var.rName] + } + } +} + +list "aws_instance" "included" { + provider = aws + + config { + filter { + name = "tag:test-filter" + values = [var.rName] + } + include_auto_scaled = true + } +} diff --git a/internal/service/ec2/testdata/Instance/list_filtered/main.tf b/internal/service/ec2/testdata/Instance/list_filtered/main.tf new file mode 100644 index 000000000000..b443362903b8 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_filtered/main.tf @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "expected" { + count = 2 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } + + tags = { + Name = "expected-${count.index}" + } +} + +resource "aws_ec2_instance_state" "expected" { + count = 2 + + instance_id = aws_instance.expected[count.index].id + state = "stopped" +} + +resource "aws_instance" "not_expected" { + count = 2 + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } + + tags = { + Name = "not-expected-${count.index}" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} diff --git a/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl new file mode 100644 index 000000000000..f4d65cfbd1de --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_filtered/main.tfquery.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws + + config { + filter { + name = "instance-state-name" + values = ["stopped"] + } + } +} diff --git a/internal/service/ec2/testdata/Instance/list_region_override/main.tf b/internal/service/ec2/testdata/Instance/list_region_override/main.tf new file mode 100644 index 000000000000..05a65a6027a7 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_region_override/main.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_instance" "test" { + count = 3 + + region = var.region + + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-arm64.id + instance_type = "t4g.nano" + + metadata_options { + http_tokens = "required" + } +} + +# acctest.ConfigLatestAmazonLinux2HVMEBSARM64AMI + +# acctest.configLatestAmazonLinux2HVMEBSAMI("arm64") + +data "aws_ami" "amzn2-ami-minimal-hvm-ebs-arm64" { + region = var.region + + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn2-ami-minimal-hvm-*"] + } + + filter { + name = "root-device-type" + values = ["ebs"] + } + + filter { + name = "architecture" + values = ["arm64"] + } +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl b/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl new file mode 100644 index 000000000000..aeff71aebd72 --- /dev/null +++ b/internal/service/ec2/testdata/Instance/list_region_override/main.tfquery.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_instance" "test" { + provider = aws + + config { + region = var.region + } +} diff --git a/internal/service/ecs/service.go b/internal/service/ecs/service.go index 1ea7f6b5b22d..9d3493c27ebc 100644 --- a/internal/service/ecs/service.go +++ b/internal/service/ecs/service.go @@ -5,6 +5,7 @@ package ecs import ( "context" + "encoding/json" "errors" "fmt" "log" @@ -19,6 +20,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ecs" + "github.com/aws/aws-sdk-go-v2/service/ecs/document" awstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -34,6 +36,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + "github.com/hashicorp/terraform-provider-aws/internal/smithy" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -649,6 +652,12 @@ func resourceService() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, + "hook_details": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + ValidateFunc: verify.ValidStringIsJSONOrYAML, + }, }, }, }, @@ -1799,7 +1808,6 @@ func resourceServiceUpdate(ctx context.Context, d *schema.ResourceData, meta any return false, err }, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "updating ECS Service (%s): %s", d.Id(), err) } @@ -1847,7 +1855,6 @@ func resourceServiceDelete(ctx context.Context, d *schema.ResourceData, meta any } _, err := conn.UpdateService(ctx, input) - if err != nil { return sdkdiag.AppendErrorf(diags, "draining ECS Service (%s): %s", d.Id(), err) } @@ -1874,7 +1881,6 @@ func resourceServiceDelete(ctx context.Context, d *schema.ResourceData, meta any return false, err }, ) - if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ECS Service (%s): %s", d.Id(), err) } @@ -1948,7 +1954,6 @@ func retryServiceCreate(ctx context.Context, conn *ecs.Client, input *ecs.Create return false, err }, ) - if err != nil { return nil, err } @@ -1958,7 +1963,6 @@ func retryServiceCreate(ctx context.Context, conn *ecs.Client, input *ecs.Create func findService(ctx context.Context, conn *ecs.Client, input *ecs.DescribeServicesInput) (*awstypes.Service, error) { output, err := findServices(ctx, conn, input) - if err != nil { return nil, err } @@ -2121,7 +2125,6 @@ func statusServiceWaitForStable(ctx context.Context, conn *ecs.Client, serviceNa return func() (any, string, error) { outputRaw, serviceStatus, err := statusService(ctx, conn, serviceName, clusterNameOrARN)() - if err != nil { return nil, "", err } @@ -2597,6 +2600,12 @@ func flattenLifecycleHooks(apiObjects []awstypes.DeploymentLifecycleHook) []any tfMap["lifecycle_stages"] = stages } + if v := apiObject.HookDetails; v != nil { + if jsonString, err := smithy.DocumentToJSONString(v); err == nil { + tfMap["hook_details"] = jsonString + } + } + tfList = append(tfList, tfMap) } @@ -2633,6 +2642,13 @@ func expandLifecycleHooks(tfList []any) []awstypes.DeploymentLifecycleHook { hook.LifecycleStages = stages } + if v, ok := tfMap["hook_details"].(string); ok && v != "" { + var jsonValue any + if err := json.Unmarshal([]byte(v), &jsonValue); err == nil { + hook.HookDetails = document.NewLazyDocument(jsonValue) + } + } + apiObject = append(apiObject, hook) } diff --git a/internal/service/ecs/service_test.go b/internal/service/ecs/service_test.go index cb17c48693fa..f9329f765463 100644 --- a/internal/service/ecs/service_test.go +++ b/internal/service/ecs/service_test.go @@ -1013,6 +1013,14 @@ func TestAccECSService_BlueGreenDeployment_basic(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "deployment_configuration.0.lifecycle_hook.*.role_arn", "aws_iam_role.global", names.AttrARN), resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "POST_SCALE_UP"), resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "POST_TEST_TRAFFIC_SHIFT"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "[1,\"2\",true]", + "lifecycle_stages.0": "POST_SCALE_UP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "3.14", + "lifecycle_stages.0": "TEST_TRAFFIC_SHIFT", + }), // Load balancer advanced configuration checks resource.TestCheckResourceAttr(resourceName, "load_balancer.0.advanced_configuration.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "load_balancer.0.advanced_configuration.0.alternate_target_group_arn"), @@ -1036,6 +1044,10 @@ func TestAccECSService_BlueGreenDeployment_basic(t *testing.T) { // Lifecycle hooks configuration checks resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.lifecycle_hook.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "deployment_configuration.0.lifecycle_hook.*.lifecycle_stages.*", "PRE_SCALE_UP"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "{\"bool_key\":true,\"int_key\":10,\"list_key\":[1,\"2\",true],\"object_key\":{\"bool_key\":true,\"int_key\":10,\"list_key\":[1,\"2\",true],\"string_key\":\"string_val\"},\"string_key\":\"string_val\"}", + "lifecycle_stages.0": "PRE_SCALE_UP", + }), // Service Connect test traffic rules checks resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.name", "x-test-header-2"), resource.TestCheckResourceAttr(resourceName, "service_connect_configuration.0.service.0.client_alias.0.test_traffic_rules.0.header.0.value.0.exact", "test-value-2"), @@ -1188,6 +1200,14 @@ func TestAccECSService_BlueGreenDeployment_changeStrategy(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": acctest.CtTrue, + "lifecycle_stages.0": "POST_SCALE_UP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "deployment_configuration.0.lifecycle_hook.*", map[string]string{ + "hook_details": "\"Test string\"", + "lifecycle_stages.0": "TEST_TRAFFIC_SHIFT", + }), ), }, { @@ -1281,7 +1301,7 @@ func TestAccECSService_BlueGreenDeployment_waitServiceActive(t *testing.T) { CheckDestroy: testAccCheckServiceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccServiceConfig_blueGreenDeployment_basic(rName, true), + Config: testAccServiceConfig_blueGreenDeployment_basic(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckServiceExists(ctx, resourceName, &service), resource.TestCheckResourceAttr(resourceName, "deployment_configuration.0.strategy", "BLUE_GREEN"), @@ -2782,7 +2802,6 @@ func testAccCheckServiceExists(ctx context.Context, name string, service *awstyp return nil }) - if err != nil { return err } @@ -3482,12 +3501,14 @@ resource "aws_ecs_service" "test" { hook_target_arn = aws_lambda_function.hook_success.arn role_arn = aws_iam_role.global.arn lifecycle_stages = ["POST_SCALE_UP", "POST_TEST_TRAFFIC_SHIFT"] + hook_details = jsonencode([1, "2", true]) } lifecycle_hook { hook_target_arn = aws_lambda_function.hook_success.arn role_arn = aws_iam_role.global.arn lifecycle_stages = ["TEST_TRAFFIC_SHIFT", "POST_PRODUCTION_TRAFFIC_SHIFT"] + hook_details = "3.14" } } @@ -3723,6 +3744,12 @@ resource "aws_ecs_service" "test" { hook_target_arn = %[2]s role_arn = aws_iam_role.global.arn lifecycle_stages = ["PRE_SCALE_UP"] + hook_details = jsonencode({ "bool_key" : true, "string_key" : "string_val", "int_key" : 10, "list_key" : [1, "2", true], "object_key" : { + "bool_key" : true, + "string_key" : "string_val", + "int_key" : 10, + "list_key" : [1, "2", true] + } }) } } @@ -3862,12 +3889,14 @@ resource "aws_ecs_service" "test" { hook_target_arn = aws_lambda_function.hook_success.arn role_arn = aws_iam_role.global.arn lifecycle_stages = ["POST_SCALE_UP", "POST_TEST_TRAFFIC_SHIFT"] + hook_details = "true" } lifecycle_hook { hook_target_arn = aws_lambda_function.hook_success.arn role_arn = aws_iam_role.global.arn lifecycle_stages = ["TEST_TRAFFIC_SHIFT", "POST_PRODUCTION_TRAFFIC_SHIFT"] + hook_details = jsonencode("Test string") } } diff --git a/internal/service/iam/role.go b/internal/service/iam/role.go index 66887151464a..3da98908bc5c 100644 --- a/internal/service/iam/role.go +++ b/internal/service/iam/role.go @@ -7,9 +7,11 @@ import ( "context" "errors" "fmt" + "iter" "log" "net/url" "reflect" + "strings" "time" "github.com/YakDriver/regexache" @@ -17,6 +19,12 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" awspolicy "github.com/hashicorp/awspolicyequivalence" + "github.com/hashicorp/go-cty/cty" + frameworkdiag "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -26,11 +34,15 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" "github.com/hashicorp/terraform-provider-aws/internal/provider/sdkv2/importer" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -200,6 +212,14 @@ func resourceRole() *schema.Resource { } } +// @SDKListResource("aws_iam_role") +func instanceResourceAsListResource() itypes.ListResourceForSDK { + l := roleListResource{} + l.SetResourceSchema(resourceRole()) + + return &l +} + func resourceRoleCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) @@ -306,32 +326,12 @@ func resourceRoleRead(ctx context.Context, d *schema.ResourceData, meta any) dia return sdkdiag.AppendErrorf(diags, "reading IAM Role (%s): waiting for valid ARN: %s", d.Id(), err) } - d.Set(names.AttrARN, role.Arn) - d.Set("create_date", role.CreateDate.Format(time.RFC3339)) - d.Set(names.AttrDescription, role.Description) - d.Set("max_session_duration", role.MaxSessionDuration) - d.Set(names.AttrName, role.RoleName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(role.RoleName))) - d.Set(names.AttrPath, role.Path) - if role.PermissionsBoundary != nil { - d.Set("permissions_boundary", role.PermissionsBoundary.PermissionsBoundaryArn) - } else { - d.Set("permissions_boundary", nil) - } - d.Set("unique_id", role.RoleId) - - assumeRolePolicy, err := url.QueryUnescape(aws.ToString(role.AssumeRolePolicyDocument)) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - - policyToSet, err := verify.PolicyToSet(d.Get("assume_role_policy").(string), assumeRolePolicy) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) + diags = append(diags, resourceRoleFlatten(ctx, role, d)...) + if diags.HasError() { + return diags } - d.Set("assume_role_policy", policyToSet) - + // `inline_policy` is deprecated, so it's not included in resourceRoleFlatten. inlinePolicies, err := readRoleInlinePolicies(ctx, conn, aws.ToString(role.RoleName)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading inline policies for IAM role %s, error: %s", d.Id(), err) @@ -348,14 +348,13 @@ func resourceRoleRead(ctx context.Context, d *schema.ResourceData, meta any) dia } } + // `managed_policy_arns` is deprecated, so it's not included in resourceRoleFlatten. policyARNs, err := findRoleAttachedPolicies(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading IAM Policies attached to Role (%s): %s", d.Id(), err) } d.Set("managed_policy_arns", policyARNs) - setTagsOut(ctx, role.Tags) - return diags } @@ -665,6 +664,59 @@ func findRole(ctx context.Context, conn *iam.Client, input *iam.GetRoleInput) (* return output.Role, nil } +func listRoles(ctx context.Context, conn *iam.Client, input *iam.ListRolesInput) iter.Seq2[awstypes.Role, error] { + return func(yield func(awstypes.Role, error) bool) { + pages := iam.NewListRolesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + yield(awstypes.Role{}, err) + return + } + + for _, role := range page.Roles { + if !yield(role, nil) { + return + } + } + } + } +} + +func resourceRoleFlatten(ctx context.Context, role *awstypes.Role, d *schema.ResourceData) diag.Diagnostics { + var diags diag.Diagnostics + + d.Set(names.AttrARN, role.Arn) + d.Set("create_date", role.CreateDate.Format(time.RFC3339)) + d.Set(names.AttrDescription, role.Description) + d.Set("max_session_duration", role.MaxSessionDuration) + d.Set(names.AttrName, role.RoleName) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(role.RoleName))) + d.Set(names.AttrPath, role.Path) + if role.PermissionsBoundary != nil { + d.Set("permissions_boundary", role.PermissionsBoundary.PermissionsBoundaryArn) + } else { + d.Set("permissions_boundary", nil) + } + d.Set("unique_id", role.RoleId) + + assumeRolePolicy, err := url.QueryUnescape(aws.ToString(role.AssumeRolePolicyDocument)) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + policyToSet, err := verify.PolicyToSet(d.Get("assume_role_policy").(string), assumeRolePolicy) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + d.Set("assume_role_policy", policyToSet) + + setTagsOut(ctx, role.Tags) + + return diags +} + func findRoleAttachedPolicies(ctx context.Context, conn *iam.Client, roleName string) ([]string, error) { input := &iam.ListAttachedRolePoliciesInput{ RoleName: aws.String(roleName), @@ -999,3 +1051,142 @@ func roleTags(ctx context.Context, conn *iam.Client, identifier string, optFns . return output, nil } + +type roleListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type roleListResourceModel struct { +} + +func (l *roleListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{}, + } +} + +func (l *roleListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.IAMClient(ctx) + + var query roleListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + } + + var input iam.ListRolesInput + if diags := fwflex.Expand(ctx, query, &input); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return + } + + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + + for output, err := range listRoles(ctx, conn, &input) { + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + // Exclude Service-Linked Roles + if strings.HasPrefix(aws.ToString(output.Path), "/aws-service-role/") { + tflog.Debug(ctx, "Skipping resource", map[string]any{ + "skip_reason": "Service-Linked Role", + "role_name": aws.ToString(output.RoleName), + names.AttrPath: aws.ToString(output.Path), + }) + continue + } + + rd := l.ResourceData() + rd.SetId(aws.ToString(output.RoleName)) + result.Diagnostics.Append(translateDiags(resourceRoleFlatten(ctx, &output, rd))...) + if result.Diagnostics.HasError() { + yield(result) + return + } + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + result.DisplayName = aws.ToString(output.RoleName) + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return + } + } + } +} + +func translateDiags(in diag.Diagnostics) frameworkdiag.Diagnostics { + out := make(frameworkdiag.Diagnostics, len(in)) + for i, diagIn := range in { + var diagOut frameworkdiag.Diagnostic + if diagIn.Severity == diag.Error { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewErrorDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeErrorDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } else { + if len(diagIn.AttributePath) == 0 { + diagOut = frameworkdiag.NewWarningDiagnostic(diagIn.Summary, diagIn.Detail) + } else { + diagOut = frameworkdiag.NewAttributeWarningDiagnostic(translatePath(diagIn.AttributePath), diagIn.Summary, diagIn.Detail) + } + } + out[i] = diagOut + } + return out +} + +func translatePath(in cty.Path) path.Path { + var out path.Path + + if len(in) == 0 { + return out + } + + step := in[0] + switch v := step.(type) { + case cty.GetAttrStep: + out = path.Root(v.Name) + } + + for i := 1; i < len(in); i++ { + step := in[i] + switch v := step.(type) { + case cty.GetAttrStep: + out = out.AtName(v.Name) + + case cty.IndexStep: + switch v.Key.Type() { + case cty.Number: + v, _ := v.Key.AsBigFloat().Int64() + out = out.AtListIndex(int(v)) + case cty.String: + out = out.AtMapKey(v.Key.AsString()) + } + } + } + + return out +} diff --git a/internal/service/iam/role_list_test.go b/internal/service/iam/role_list_test.go new file mode 100644 index 000000000000..a7e851de2006 --- /dev/null +++ b/internal/service/iam/role_list_test.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccIAMRole_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_iam_role.test[0]" + resourceName2 := "aws_iam_role.test[1]" + resourceName3 := "aws_iam_role.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IAMServiceID), + CheckDestroy: testAccCheckRoleDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.GlobalARNExact("iam", "role/"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Role/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-0"), + }), + + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-1"), + }), + + querycheck.ExpectIdentity("aws_iam_role.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrName: knownvalue.StringExact(rName + "-2"), + }), + }, + }, + }, + }) +} diff --git a/internal/service/iam/role_test.go b/internal/service/iam/role_test.go index 7360ad2a17c1..4f84101404dc 100644 --- a/internal/service/iam/role_test.go +++ b/internal/service/iam/role_test.go @@ -39,10 +39,11 @@ func TestAccIAMRole_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttrSet(resourceName, "create_date"), + acctest.CheckResourceAttrGlobalARNFormat(ctx, resourceName, names.AttrARN, "iam", "role/{name}"), ), }, { @@ -68,7 +69,7 @@ func TestAccIAMRole_description(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_description(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "This 1s a D3scr!pti0n with weird content: &@90ë\"'{«¡Çø}"), @@ -81,7 +82,7 @@ func TestAccIAMRole_description(t *testing.T) { }, { Config: testAccRoleConfig_updatedDescription(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPath, "/"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "This 1s an Upd@ted D3scr!pti0n with weird content: &90ë\"'{«¡Çø}"), @@ -89,7 +90,7 @@ func TestAccIAMRole_description(t *testing.T) { }, { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttrSet(resourceName, "create_date"), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), @@ -112,7 +113,7 @@ func TestAccIAMRole_nameGenerated(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_nameGenerated(), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), acctest.CheckResourceAttrNameGenerated(resourceName, names.AttrName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, "terraform-"), @@ -140,7 +141,7 @@ func TestAccIAMRole_namePrefix(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_namePrefix(acctest.ResourcePrefix), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), acctest.CheckResourceAttrNameFromPrefix(resourceName, names.AttrName, acctest.ResourcePrefix), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, acctest.ResourcePrefix), @@ -169,7 +170,7 @@ func TestAccIAMRole_testNameChange(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_pre(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), }, @@ -181,7 +182,7 @@ func TestAccIAMRole_testNameChange(t *testing.T) { }, { Config: testAccRoleConfig_post(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), }, @@ -205,7 +206,7 @@ func TestAccIAMRole_diffs(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_diffs(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -216,7 +217,7 @@ func TestAccIAMRole_diffs(t *testing.T) { }, { Config: testAccRoleConfig_diffs(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -258,7 +259,7 @@ func TestAccIAMRole_diffsCondition(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_diffsCondition(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), ), ConfigPlanChecks: resource.ConfigPlanChecks{ @@ -315,7 +316,7 @@ func TestAccIAMRole_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfiam.ResourceRole(), resourceName), ), @@ -339,7 +340,7 @@ func TestAccIAMRole_policiesForceDetach(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_forceDetachPolicies(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), testAccAddRolePolicy(ctx, resourceName), ), @@ -376,7 +377,7 @@ func TestAccIAMRole_maxSessionDuration(t *testing.T) { }, { Config: testAccRoleConfig_maxSessionDuration(rName, 3700), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "max_session_duration", "3700"), ), @@ -388,7 +389,7 @@ func TestAccIAMRole_maxSessionDuration(t *testing.T) { }, { Config: testAccRoleConfig_maxSessionDuration(rName, 3701), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, "max_session_duration", "3701"), ), @@ -421,7 +422,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test creation { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -430,7 +431,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test update { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary2), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary2), @@ -445,7 +446,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test removal { Config: testAccRoleConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), testAccCheckRolePermissionsBoundary(&role, ""), @@ -454,7 +455,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test addition { Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -475,7 +476,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { }, Config: testAccRoleConfig_permissionsBoundary(rName, permissionsBoundary1), // check the boundary was restored - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", permissionsBoundary1), testAccCheckRolePermissionsBoundary(&role, permissionsBoundary1), @@ -484,7 +485,7 @@ func TestAccIAMRole_permissionsBoundary(t *testing.T) { // Test empty value { Config: testAccRoleConfig_permissionsBoundary(rName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), testAccCheckRolePermissionsBoundary(&role, ""), @@ -511,7 +512,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -520,7 +521,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyInlineUpdate(rName, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "2"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -528,7 +529,7 @@ func TestAccIAMRole_InlinePolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyInlineUpdateDown(rName, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -559,7 +560,7 @@ func TestAccIAMRole_InlinePolicy_ignoreOrder(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInlineActionOrder(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -619,7 +620,7 @@ func TestAccIAMRole_InlinePolicy_empty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, @@ -662,7 +663,7 @@ func TestAccIAMRole_ManagedPolicy_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyManaged(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), @@ -670,14 +671,14 @@ func TestAccIAMRole_ManagedPolicy_basic(t *testing.T) { }, { Config: testAccRoleConfig_policyManagedUpdate(rName, policyName1, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), ), }, { Config: testAccRoleConfig_policyManagedUpdateDown(rName, policyName1, policyName2, policyName3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -708,7 +709,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandRemovalAddedBack(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyDetachManagedPolicy(ctx, &role, policyName), ), @@ -716,7 +717,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandRemovalAddedBack(t *testing.T) { }, { Config: testAccRoleConfig_policyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -742,7 +743,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandRemovalAddedBack(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName), ), @@ -750,7 +751,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandRemovalAddedBack(t *testing.T) { }, { Config: testAccRoleConfig_policyInline(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), ), @@ -777,7 +778,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemoved(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyExtraManaged(rName, policyName1, policyName2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName2), ), @@ -785,7 +786,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemoved(t *testing.T) { }, { Config: testAccRoleConfig_policyExtraManaged(rName, policyName1, policyName2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), ), @@ -812,7 +813,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemoved(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName2), ), @@ -820,7 +821,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemoved(t *testing.T) { }, { Config: testAccRoleConfig_policyInline(rName, policyName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), resource.TestCheckResourceAttr(resourceName, "inline_policy.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "0"), @@ -848,21 +849,21 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionIgnored(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName1), ), }, { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName2), ), }, { Config: testAccRoleConfig_policyNoInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName1), testAccCheckRolePolicyRemoveInlinePolicy(ctx, &role, policyName2), @@ -889,14 +890,14 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionIgnored(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyNoManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName), ), }, { Config: testAccRoleConfig_policyNoManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyDetachManagedPolicy(ctx, &role, policyName), ), @@ -922,7 +923,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAddInlinePolicy(ctx, &role, policyName), ), @@ -930,7 +931,7 @@ func TestAccIAMRole_InlinePolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { }, { Config: testAccRoleConfig_policyEmptyInline(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, @@ -955,7 +956,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccRoleConfig_policyEmptyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), testAccCheckRolePolicyAttachManagedPolicy(ctx, &role, policyName), ), @@ -963,7 +964,7 @@ func TestAccIAMRole_ManagedPolicy_outOfBandAdditionRemovedEmpty(t *testing.T) { }, { Config: testAccRoleConfig_policyEmptyManaged(rName, policyName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRoleExists(ctx, resourceName, &role), ), }, diff --git a/internal/service/iam/service_package_gen.go b/internal/service/iam/service_package_gen.go index 1619058c64fd..85301f905015 100644 --- a/internal/service/iam/service_package_gen.go +++ b/internal/service/iam/service_package_gen.go @@ -4,6 +4,8 @@ package iam import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" @@ -429,6 +431,22 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource{ + { + Factory: instanceResourceAsListResource, + TypeName: "aws_iam_role", + Name: "Role", + Region: unique.Make(inttypes.ResourceRegionDisabled()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrName, + ResourceType: "Role", + }), + Identity: inttypes.GlobalSingleParameterIdentity(names.AttrName), + }, + }) +} + func (p *servicePackage) ServicePackageName() string { return names.IAM } diff --git a/internal/service/iam/testdata/Role/list_basic/main.tfquery.hcl b/internal/service/iam/testdata/Role/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..a50c741ab0ce --- /dev/null +++ b/internal/service/iam/testdata/Role/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_iam_role" "test" { + provider = aws +} diff --git a/internal/service/iam/testdata/Role/list_basic/main_gen.tf b/internal/service/iam/testdata/Role/list_basic/main_gen.tf new file mode 100644 index 000000000000..94d256f271a1 --- /dev/null +++ b/internal/service/iam/testdata/Role/list_basic/main_gen.tf @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_iam_role" "test" { + count = 3 + + name = "${var.rName}-${count.index}" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole", + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}", + } + Effect = "Allow" + Sid = "" + }] + }) +} + +data "aws_partition" "current" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/lambda/invoke_action.go b/internal/service/lambda/invoke_action.go new file mode 100644 index 000000000000..0e0fc0ed2ff2 --- /dev/null +++ b/internal/service/lambda/invoke_action.go @@ -0,0 +1,216 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lambda + +import ( + "context" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/lambda" + awstypes "github.com/aws/aws-sdk-go-v2/service/lambda/types" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_lambda_invoke, name="Invoke") +func newInvokeAction(_ context.Context) (action.ActionWithConfigure, error) { + return &invokeAction{}, nil +} + +var ( + _ action.Action = (*invokeAction)(nil) +) + +type invokeAction struct { + framework.ActionWithModel[invokeActionModel] +} + +type invokeActionModel struct { + framework.WithRegionModel + FunctionName types.String `tfsdk:"function_name"` + Payload types.String `tfsdk:"payload"` + Qualifier types.String `tfsdk:"qualifier"` + InvocationType fwtypes.StringEnum[awstypes.InvocationType] `tfsdk:"invocation_type"` + LogType fwtypes.StringEnum[awstypes.LogType] `tfsdk:"log_type"` + ClientContext types.String `tfsdk:"client_context"` +} + +func (a *invokeAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Invokes an AWS Lambda function with the specified payload. This action allows for imperative invocation of Lambda functions with full control over invocation parameters.", + Attributes: map[string]schema.Attribute{ + "function_name": schema.StringAttribute{ + Description: "The name, ARN, or partial ARN of the Lambda function to invoke. You can specify a function name (e.g., my-function), a qualified function name (e.g., my-function:PROD), or a partial ARN (e.g., 123456789012:function:my-function).", + Required: true, + }, + "payload": schema.StringAttribute{ + Description: "The JSON payload to send to the Lambda function. This should be a valid JSON string that represents the event data for your function.", + Required: true, + Validators: []validator.String{ + validators.JSON(), + }, + }, + "qualifier": schema.StringAttribute{ + Description: "The version or alias of the Lambda function to invoke. If not specified, the $LATEST version will be invoked.", + Optional: true, + }, + "invocation_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.InvocationType](), + Description: "The invocation type. Valid values are 'RequestResponse' (synchronous), 'Event' (asynchronous), and 'DryRun' (validate parameters without invoking). Defaults to 'RequestResponse'.", + Optional: true, + }, + "log_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LogType](), + Description: "Set to 'Tail' to include the execution log in the response. Only applies to synchronous invocations ('RequestResponse' invocation type). Defaults to 'None'.", + Optional: true, + }, + "client_context": schema.StringAttribute{ + Description: "Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. This is only used for mobile applications.", + Optional: true, + }, + }, + } +} + +func (a *invokeAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config invokeActionModel + + // Parse configuration + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Get AWS client + conn := a.Meta().LambdaClient(ctx) + + functionName := config.FunctionName.ValueString() + payload := config.Payload.ValueString() + + // Set default values for optional parameters + invocationType := awstypes.InvocationTypeRequestResponse + if !config.InvocationType.IsNull() && !config.InvocationType.IsUnknown() { + invocationType = config.InvocationType.ValueEnum() + } + + logType := awstypes.LogTypeNone + if !config.LogType.IsNull() && !config.LogType.IsUnknown() { + logType = config.LogType.ValueEnum() + } + + tflog.Info(ctx, "Starting Lambda function invocation action", map[string]any{ + "function_name": functionName, + "invocation_type": string(invocationType), + "log_type": string(logType), + "payload_length": len(payload), + "has_qualifier": !config.Qualifier.IsNull(), + "has_client_context": !config.ClientContext.IsNull(), + }) + + // Send initial progress update + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Invoking Lambda function %s...", functionName), + }) + + // Build the invoke input + input := &lambda.InvokeInput{ + FunctionName: aws.String(functionName), + Payload: []byte(payload), + InvocationType: invocationType, + LogType: logType, + } + + // Set optional parameters + if !config.Qualifier.IsNull() { + input.Qualifier = config.Qualifier.ValueStringPointer() + } + + if !config.ClientContext.IsNull() { + clientContext := config.ClientContext.ValueString() + // Validate that client context is base64 encoded + if _, err := base64.StdEncoding.DecodeString(clientContext); err != nil { + resp.Diagnostics.AddError( + "Invalid Client Context", + fmt.Sprintf("Client context must be base64 encoded: %s", err), + ) + return + } + input.ClientContext = aws.String(clientContext) + } + + // Perform the invocation + output, err := conn.Invoke(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Invoke Lambda Function", + fmt.Sprintf("Could not invoke Lambda function %s: %s", functionName, err), + ) + return + } + + // Handle function errors + if output.FunctionError != nil { + functionError := aws.ToString(output.FunctionError) + payloadStr := string(output.Payload) + + resp.Diagnostics.AddError( + "Lambda Function Execution Error", + fmt.Sprintf("Lambda function %s returned an error (%s): %s", functionName, functionError, payloadStr), + ) + return + } + + // Handle different invocation types + switch invocationType { + case awstypes.InvocationTypeRequestResponse: + // For synchronous invocations, we get an immediate response + statusCode := output.StatusCode + payloadLength := len(output.Payload) + + var message string + if logType == awstypes.LogTypeTail && output.LogResult != nil { + message = fmt.Sprintf("Lambda function %s invoked successfully (status: %d, payload: %d bytes, logs included)", + functionName, statusCode, payloadLength) + } else { + message = fmt.Sprintf("Lambda function %s invoked successfully (status: %d, payload: %d bytes)", + functionName, statusCode, payloadLength) + } + + resp.SendProgress(action.InvokeProgressEvent{ + Message: message, + }) + + case awstypes.InvocationTypeEvent: + // For asynchronous invocations, we only get confirmation that the request was accepted + statusCode := output.StatusCode + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Lambda function %s invoked asynchronously (status: %d)", functionName, statusCode), + }) + + case awstypes.InvocationTypeDryRun: + // For dry run, we validate parameters without actually invoking + statusCode := output.StatusCode + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Lambda function %s dry run completed successfully (status: %d)", functionName, statusCode), + }) + } + + tflog.Info(ctx, "Lambda function invocation action completed successfully", map[string]any{ + "function_name": functionName, + "invocation_type": string(invocationType), + names.AttrStatusCode: output.StatusCode, + "executed_version": aws.ToString(output.ExecutedVersion), + "has_logs": output.LogResult != nil, + "payload_length": len(output.Payload), + }) +} diff --git a/internal/service/lambda/invoke_action_test.go b/internal/service/lambda/invoke_action_test.go new file mode 100644 index 000000000000..ffedccd8a10e --- /dev/null +++ b/internal/service/lambda/invoke_action_test.go @@ -0,0 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package lambda_test + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/lambda" + awstypes "github.com/aws/aws-sdk-go-v2/service/lambda/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccLambdaInvokeAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "value3" + inputJSON := `{"key1":"value1","key2":"value2"}` + expectedResult := fmt.Sprintf(`{"key1":"value1","key2":"value2","key3":%q}`, testData) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_basic(rName, testData, inputJSON), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeAction(ctx, rName, inputJSON, expectedResult), + ), + }, + }, + }) +} + +func TestAccLambdaInvokeAction_withQualifier(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "qualifier_test" + inputJSON := `{"key1":"value1","key2":"value2"}` + expectedResult := fmt.Sprintf(`{"key1":"value1","key2":"value2","key3":%q}`, testData) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_withQualifier(rName, testData, inputJSON), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionWithQualifier(ctx, rName, inputJSON, expectedResult), + ), + }, + }, + }) +} + +func TestAccLambdaInvokeAction_invocationTypes(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "invocation_types_test" + inputJSON := `{"key1":"value1","key2":"value2"}` + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_invocationType(rName, testData, inputJSON, "RequestResponse"), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionInvocationType(ctx, rName, inputJSON, awstypes.InvocationTypeRequestResponse), + ), + }, + { + Config: testAccInvokeActionConfig_invocationType(rName, testData, inputJSON, "Event"), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionInvocationType(ctx, rName, inputJSON, awstypes.InvocationTypeEvent), + ), + }, + { + Config: testAccInvokeActionConfig_invocationType(rName, testData, inputJSON, "DryRun"), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionInvocationType(ctx, rName, inputJSON, awstypes.InvocationTypeDryRun), + ), + }, + }, + }) +} + +func TestAccLambdaInvokeAction_logTypes(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "log_types_test" + inputJSON := `{"key1":"value1","key2":"value2"}` + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_logType(rName, testData, inputJSON, "None"), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionLogType(ctx, rName, inputJSON, awstypes.LogTypeNone), + ), + }, + { + Config: testAccInvokeActionConfig_logType(rName, testData, inputJSON, "Tail"), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionLogType(ctx, rName, inputJSON, awstypes.LogTypeTail), + ), + }, + }, + }) +} + +func TestAccLambdaInvokeAction_clientContext(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "client_context_test" + inputJSON := `{"key1":"value1","key2":"value2"}` + clientContext := base64.StdEncoding.EncodeToString([]byte(`{"client":{"client_id":"test_client","app_version":"1.0.0"},"env":{"locale":"en_US"}}`)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_clientContext(rName, testData, inputJSON, clientContext), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeActionClientContext(ctx, rName, inputJSON, clientContext), + ), + }, + }, + }) +} + +func TestAccLambdaInvokeAction_complexPayload(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + testData := "complex_test" + inputJSON := `{"key1":{"subkey1":"subvalue1"},"key2":{"subkey2":"subvalue2","subkey3":{"a":"b"}}}` + expectedResult := fmt.Sprintf(`{"key1":{"subkey1":"subvalue1"},"key2":{"subkey2":"subvalue2","subkey3":{"a":"b"}},"key3":%q}`, testData) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.LambdaEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccInvokeActionConfig_basic(rName, testData, inputJSON), + Check: resource.ComposeTestCheckFunc( + testAccCheckInvokeAction(ctx, rName, inputJSON, expectedResult), + ), + }, + }, + }) +} + +// Test helper functions + +// testAccCheckInvokeAction verifies that the action can successfully invoke a Lambda function +func testAccCheckInvokeAction(ctx context.Context, functionName, inputJSON, expectedResult string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) + + // Invoke the function directly to verify it's working and compare results + input := &lambda.InvokeInput{ + FunctionName: &functionName, + InvocationType: awstypes.InvocationTypeRequestResponse, + Payload: []byte(inputJSON), + } + + output, err := conn.Invoke(ctx, input) + if err != nil { + return fmt.Errorf("Failed to invoke Lambda function %s: %w", functionName, err) + } + + if output.FunctionError != nil { + return fmt.Errorf("Lambda function %s returned an error: %s", functionName, string(output.Payload)) + } + + actualResult := string(output.Payload) + if actualResult != expectedResult { + return fmt.Errorf("Lambda function %s result mismatch. Expected: %s, Got: %s", functionName, expectedResult, actualResult) + } + + return nil + } +} + +// testAccCheckInvokeActionWithQualifier verifies action works with function qualifiers +func testAccCheckInvokeActionWithQualifier(ctx context.Context, functionName, inputJSON, expectedResult string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) + + // Get the function to retrieve the version + getFunc, err := conn.GetFunction(ctx, &lambda.GetFunctionInput{ + FunctionName: &functionName, + }) + if err != nil { + return fmt.Errorf("Failed to get Lambda function %s: %w", functionName, err) + } + + // Invoke with the specific version + input := &lambda.InvokeInput{ + FunctionName: &functionName, + InvocationType: awstypes.InvocationTypeRequestResponse, + Payload: []byte(inputJSON), + Qualifier: getFunc.Configuration.Version, + } + + output, err := conn.Invoke(ctx, input) + if err != nil { + return fmt.Errorf("Failed to invoke Lambda function %s with qualifier: %w", functionName, err) + } + + if output.FunctionError != nil { + return fmt.Errorf("Lambda function %s returned an error: %s", functionName, string(output.Payload)) + } + + actualResult := string(output.Payload) + if actualResult != expectedResult { + return fmt.Errorf("Lambda function %s result mismatch with qualifier. Expected: %s, Got: %s", functionName, expectedResult, actualResult) + } + + return nil + } +} + +// testAccCheckInvokeActionInvocationType verifies different invocation types work +func testAccCheckInvokeActionInvocationType(ctx context.Context, functionName, inputJSON string, invocationType awstypes.InvocationType) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) + + input := &lambda.InvokeInput{ + FunctionName: &functionName, + InvocationType: invocationType, + Payload: []byte(inputJSON), + } + + output, err := conn.Invoke(ctx, input) + if err != nil { + return fmt.Errorf("Failed to invoke Lambda function %s with invocation type %s: %w", functionName, string(invocationType), err) + } + + // For async invocations, we just verify the request was accepted + if invocationType == awstypes.InvocationTypeEvent { + if output.StatusCode != http.StatusAccepted { + return fmt.Errorf("Expected status code 202 for async invocation, got %d", output.StatusCode) + } + } + + // For dry run, we verify the function would execute successfully + if invocationType == awstypes.InvocationTypeDryRun { + if output.StatusCode != http.StatusNoContent { + return fmt.Errorf("Expected status code 204 for dry run, got %d", output.StatusCode) + } + } + + return nil + } +} + +// testAccCheckInvokeActionLogType verifies log type configuration works +func testAccCheckInvokeActionLogType(ctx context.Context, functionName, inputJSON string, logType awstypes.LogType) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) + + input := &lambda.InvokeInput{ + FunctionName: &functionName, + InvocationType: awstypes.InvocationTypeRequestResponse, + Payload: []byte(inputJSON), + LogType: logType, + } + + output, err := conn.Invoke(ctx, input) + if err != nil { + return fmt.Errorf("Failed to invoke Lambda function %s with log type %s: %w", functionName, string(logType), err) + } + + if output.FunctionError != nil { + return fmt.Errorf("Lambda function %s returned an error: %s", functionName, string(output.Payload)) + } + + // If log type is Tail, we should have log results + if logType == awstypes.LogTypeTail { + if output.LogResult == nil { + return fmt.Errorf("Expected log result when log type is Tail, but got none") + } + } + + return nil + } +} + +// testAccCheckInvokeActionClientContext verifies client context is passed correctly +func testAccCheckInvokeActionClientContext(ctx context.Context, functionName, inputJSON, clientContext string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) + + input := &lambda.InvokeInput{ + FunctionName: &functionName, + InvocationType: awstypes.InvocationTypeRequestResponse, + Payload: []byte(inputJSON), + ClientContext: &clientContext, + } + + output, err := conn.Invoke(ctx, input) + if err != nil { + return fmt.Errorf("Failed to invoke Lambda function %s with client context: %w", functionName, err) + } + + if output.FunctionError != nil { + return fmt.Errorf("Lambda function %s returned an error: %s", functionName, string(output.Payload)) + } + + return nil + } +} + +// Configuration functions + +func testAccInvokeActionConfig_base(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "test" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["lambda.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = data.aws_iam_policy_document.test.json +} + +resource "aws_iam_role_policy_attachment" "test" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.test.name +} +`, rName) +} + +func testAccInvokeActionConfig_function(rName, testData string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_base(rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + depends_on = [aws_iam_role_policy_attachment.test] + + filename = "test-fixtures/lambda_invocation.zip" + function_name = %[1]q + role = aws_iam_role.test.arn + handler = "lambda_invocation.handler" + runtime = "nodejs18.x" + + environment { + variables = { + TEST_DATA = %[2]q + } + } +} +`, rName, testData)) +} + +func testAccInvokeActionConfig_basic(rName, testData, inputJSON string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_function(rName, testData), + fmt.Sprintf(` +action "aws_lambda_invoke" "test" { + config { + function_name = aws_lambda_function.test.function_name + payload = %[1]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.test] + } + } +} +`, inputJSON)) +} + +func testAccInvokeActionConfig_withQualifier(rName, testData, inputJSON string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_base(rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + depends_on = [aws_iam_role_policy_attachment.test] + + filename = "test-fixtures/lambda_invocation.zip" + function_name = %[1]q + role = aws_iam_role.test.arn + handler = "lambda_invocation.handler" + runtime = "nodejs18.x" + publish = true + + environment { + variables = { + TEST_DATA = %[2]q + } + } +} + +action "aws_lambda_invoke" "test" { + config { + function_name = aws_lambda_function.test.function_name + payload = %[3]q + qualifier = aws_lambda_function.test.version + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.test] + } + } +} +`, rName, testData, inputJSON)) +} + +func testAccInvokeActionConfig_invocationType(rName, testData, inputJSON, invocationType string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_function(rName, testData), + fmt.Sprintf(` +action "aws_lambda_invoke" "test" { + config { + function_name = aws_lambda_function.test.function_name + payload = %[1]q + invocation_type = %[2]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.test] + } + } +} +`, inputJSON, invocationType)) +} + +func testAccInvokeActionConfig_logType(rName, testData, inputJSON, logType string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_function(rName, testData), + fmt.Sprintf(` +action "aws_lambda_invoke" "test" { + config { + function_name = aws_lambda_function.test.function_name + payload = %[1]q + log_type = %[2]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.test] + } + } +} +`, inputJSON, logType)) +} + +func testAccInvokeActionConfig_clientContext(rName, testData, inputJSON, clientContext string) string { + return acctest.ConfigCompose( + testAccInvokeActionConfig_function(rName, testData), + fmt.Sprintf(` +action "aws_lambda_invoke" "test" { + config { + function_name = aws_lambda_function.test.function_name + payload = %[1]q + client_context = %[2]q + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.test] + } + } +} +`, inputJSON, clientContext)) +} diff --git a/internal/service/lambda/service_package_gen.go b/internal/service/lambda/service_package_gen.go index cc19d311790d..291f5f67a8fd 100644 --- a/internal/service/lambda/service_package_gen.go +++ b/internal/service/lambda/service_package_gen.go @@ -17,6 +17,16 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newInvokeAction, + TypeName: "aws_lambda_invoke", + Name: "Invoke", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} func (p *servicePackage) EphemeralResources(ctx context.Context) []*inttypes.ServicePackageEphemeralResource { return []*inttypes.ServicePackageEphemeralResource{ { diff --git a/internal/service/logs/group.go b/internal/service/logs/group.go index 640658404788..d81def67d4a8 100644 --- a/internal/service/logs/group.go +++ b/internal/service/logs/group.go @@ -5,12 +5,16 @@ package logs import ( "context" + "fmt" + "iter" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" awstypes "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/hashicorp/terraform-plugin-framework/list" + listschema "github.com/hashicorp/terraform-plugin-framework/list/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -18,20 +22,23 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/retry" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + inttypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_cloudwatch_log_group", name="Log Group") // @Tags(identifierAttribute="arn") +// @IdentityAttribute("name") // @Testing(destroyTakesT=true) // @Testing(existsTakesT=true) // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types;awstypes;awstypes.LogGroup") -// @IdentityAttribute("name") // @Testing(idAttrDuplicates="name") // @Testing(preIdentityVersion="v6.7.0") func resourceGroup() *schema.Resource { @@ -101,6 +108,14 @@ func resourceGroup() *schema.Resource { } } +// @SDKListResource("aws_cloudwatch_log_group") +func logGroupResourceAsListResource() inttypes.ListResourceForSDK { + l := logGroupListResource{} + l.SetResourceSchema(resourceGroup()) + + return &l +} + func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).LogsClient(ctx) @@ -158,14 +173,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta any) di return sdkdiag.AppendErrorf(diags, "reading CloudWatch Logs Log Group (%s): %s", d.Id(), err) } - d.Set(names.AttrARN, trimLogGroupARNWildcardSuffix(aws.ToString(lg.Arn))) - d.Set(names.AttrKMSKeyID, lg.KmsKeyId) - d.Set("log_group_class", lg.LogGroupClass) - d.Set(names.AttrName, lg.LogGroupName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(lg.LogGroupName))) - d.Set("retention_in_days", lg.RetentionInDays) - // Support in-place update of non-refreshable attribute. - d.Set(names.AttrSkipDestroy, d.Get(names.AttrSkipDestroy)) + resourceGroupFlatten(ctx, d, *lg) return diags } @@ -264,40 +272,123 @@ func findLogGroupByName(ctx context.Context, conn *cloudwatchlogs.Client, name s return findLogGroup(ctx, conn, &input, func(v *awstypes.LogGroup) bool { return aws.ToString(v.LogGroupName) == name - }) + }, tfslices.WithReturnFirstMatch) } -func findLogGroup(ctx context.Context, conn *cloudwatchlogs.Client, input *cloudwatchlogs.DescribeLogGroupsInput, filter tfslices.Predicate[*awstypes.LogGroup]) (*awstypes.LogGroup, error) { - output, err := findLogGroups(ctx, conn, input, filter, tfslices.WithReturnFirstMatch) +func findLogGroup(ctx context.Context, conn *cloudwatchlogs.Client, input *cloudwatchlogs.DescribeLogGroupsInput, filter tfslices.Predicate[*awstypes.LogGroup], optFns ...tfslices.FinderOptionsFunc) (*awstypes.LogGroup, error) { + opts := tfslices.NewFinderOptions(optFns) + var output []awstypes.LogGroup + for value, err := range listLogGroups(ctx, conn, input, filter) { + if err != nil { + return nil, err + } - if err != nil { - return nil, err + output = append(output, value) + if opts.ReturnFirstMatch() { + break + } } return tfresource.AssertSingleValueResult(output) } -func findLogGroups(ctx context.Context, conn *cloudwatchlogs.Client, input *cloudwatchlogs.DescribeLogGroupsInput, filter tfslices.Predicate[*awstypes.LogGroup], optFns ...tfslices.FinderOptionsFunc) ([]awstypes.LogGroup, error) { - var output []awstypes.LogGroup - opts := tfslices.NewFinderOptions(optFns) +func listLogGroups(ctx context.Context, conn *cloudwatchlogs.Client, input *cloudwatchlogs.DescribeLogGroupsInput, filter tfslices.Predicate[*awstypes.LogGroup]) iter.Seq2[awstypes.LogGroup, error] { + return func(yield func(awstypes.LogGroup, error) bool) { + pages := cloudwatchlogs.NewDescribeLogGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + yield(awstypes.LogGroup{}, fmt.Errorf("listing CloudWatch Logs Log Groups: %w", err)) + return + } - pages := cloudwatchlogs.NewDescribeLogGroupsPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) + for _, v := range page.LogGroups { + if filter(&v) { + if !yield(v, nil) { + return + } + } + } + } + } +} - if err != nil { - return nil, err +var _ inttypes.ListResourceForSDK = &logGroupListResource{} + +type logGroupListResource struct { + framework.ResourceWithConfigure + framework.ListResourceWithSDKv2Resource + framework.ListResourceWithSDKv2Tags +} + +type logGroupListResourceModel struct { + framework.WithRegionModel +} + +// ListResourceConfigSchema defines the schema for the List configuration +// might be able to intercept or wrap this for simplicity +func (l *logGroupListResource) ListResourceConfigSchema(ctx context.Context, request list.ListResourceSchemaRequest, response *list.ListResourceSchemaResponse) { + response.Schema = listschema.Schema{ + Attributes: map[string]listschema.Attribute{}, + } +} + +func (l *logGroupListResource) List(ctx context.Context, request list.ListRequest, stream *list.ListResultsStream) { + awsClient := l.Meta() + conn := awsClient.LogsClient(ctx) + + var query logGroupListResourceModel + if request.Config.Raw.IsKnown() && !request.Config.Raw.IsNull() { + if diags := request.Config.Get(ctx, &query); diags.HasError() { + stream.Results = list.ListResultsStreamDiagnostics(diags) + return } + } - for _, v := range page.LogGroups { - if filter(&v) { - output = append(output, v) - if opts.ReturnFirstMatch() { - return output, nil - } + stream.Results = func(yield func(list.ListResult) bool) { + result := request.NewListResult(ctx) + var input cloudwatchlogs.DescribeLogGroupsInput + for output, err := range listLogGroups(ctx, conn, &input, tfslices.PredicateTrue[*awstypes.LogGroup]()) { + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + rd := l.ResourceData() + rd.SetId(aws.ToString(output.LogGroupName)) + resourceGroupFlatten(ctx, rd, output) + + // set tags + err = l.SetTags(ctx, awsClient, rd) + if err != nil { + result = fwdiag.NewListResultErrorDiagnostic(err) + yield(result) + return + } + + result.DisplayName = aws.ToString(output.LogGroupName) + + l.SetResult(ctx, awsClient, request.IncludeResource, &result, rd) + if result.Diagnostics.HasError() { + yield(result) + return + } + + if !yield(result) { + return } } } +} - return output, nil +func resourceGroupFlatten(_ context.Context, d *schema.ResourceData, lg awstypes.LogGroup) { + d.Set(names.AttrARN, trimLogGroupARNWildcardSuffix(aws.ToString(lg.Arn))) + d.Set(names.AttrKMSKeyID, lg.KmsKeyId) + d.Set("log_group_class", lg.LogGroupClass) + d.Set(names.AttrName, lg.LogGroupName) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(lg.LogGroupName))) + d.Set("retention_in_days", lg.RetentionInDays) + // Support in-place update of non-refreshable attribute. + d.Set(names.AttrSkipDestroy, d.Get(names.AttrSkipDestroy)) } diff --git a/internal/service/logs/group_list_test.go b/internal/service/logs/group_list_test.go new file mode 100644 index 000000000000..5b89ef32d70f --- /dev/null +++ b/internal/service/logs/group_list_test.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logs_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/querycheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfknownvalue "github.com/hashicorp/terraform-provider-aws/internal/acctest/knownvalue" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccLogsLogGroup_List_Basic(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_cloudwatch_log_group.test[0]" + resourceName2 := "aws_cloudwatch_log_group.test[1]" + resourceName3 := "aws_cloudwatch_log_group.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + CheckDestroy: testAccCheckLogGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LogGroup/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("logs", "log-group:"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("logs", "log-group:"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNExact("logs", "log-group:"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LogGroup/list_basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.StringExact(rName + "-0"), + }), + + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.StringExact(rName + "-1"), + }), + + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.Region()), + names.AttrName: knownvalue.StringExact(rName + "-2"), + }), + }, + }, + }, + }) +} + +func TestAccLogsLogGroup_List_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName1 := "aws_cloudwatch_log_group.test[0]" + resourceName2 := "aws_cloudwatch_log_group.test[1]" + resourceName3 := "aws_cloudwatch_log_group.test[2]" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + CheckDestroy: testAccCheckLogGroupDestroy(ctx, t), + Steps: []resource.TestStep{ + // Step 1: Setup + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LogGroup/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName1, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("logs", "log-group:"+rName+"-0")), + statecheck.ExpectKnownValue(resourceName2, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("logs", "log-group:"+rName+"-1")), + statecheck.ExpectKnownValue(resourceName3, tfjsonpath.New(names.AttrARN), tfknownvalue.RegionalARNAlternateRegionExact("logs", "log-group:"+rName+"-2")), + }, + }, + + // Step 2: Query + { + Query: true, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/LogGroup/list_region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + QueryResultChecks: []querycheck.QueryResultCheck{ + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.StringExact(rName + "-0"), + }), + + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.StringExact(rName + "-1"), + }), + + querycheck.ExpectIdentity("aws_cloudwatch_log_group.test", map[string]knownvalue.Check{ + names.AttrAccountID: tfknownvalue.AccountID(), + names.AttrRegion: knownvalue.StringExact(acctest.AlternateRegion()), + names.AttrName: knownvalue.StringExact(rName + "-2"), + }), + }, + }, + }, + }) +} diff --git a/internal/service/logs/group_test.go b/internal/service/logs/group_test.go index 3474e9375989..982d44c8f2ad 100644 --- a/internal/service/logs/group_test.go +++ b/internal/service/logs/group_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccLogsGroup_basic(t *testing.T) { +func TestAccLogsLogGroup_basic(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -41,7 +41,7 @@ func TestAccLogsGroup_basic(t *testing.T) { Config: testAccGroupConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLogGroupExists(ctx, t, resourceName, &v), - acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "logs", fmt.Sprintf("log-group:%s", rName)), + acctest.CheckResourceAttrRegionalARNFormat(ctx, resourceName, names.AttrARN, "logs", "log-group:{name}"), resource.TestCheckResourceAttr(resourceName, names.AttrKMSKeyID, ""), resource.TestCheckResourceAttr(resourceName, "log_group_class", expectedLogGroupClass), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -60,7 +60,7 @@ func TestAccLogsGroup_basic(t *testing.T) { }) } -func TestAccLogsGroup_nameGenerate(t *testing.T) { +func TestAccLogsLogGroup_nameGenerate(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup resourceName := "aws_cloudwatch_log_group.test" @@ -88,7 +88,7 @@ func TestAccLogsGroup_nameGenerate(t *testing.T) { }) } -func TestAccLogsGroup_namePrefix(t *testing.T) { +func TestAccLogsLogGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup resourceName := "aws_cloudwatch_log_group.test" @@ -116,7 +116,7 @@ func TestAccLogsGroup_namePrefix(t *testing.T) { }) } -func TestAccLogsGroup_disappears(t *testing.T) { +func TestAccLogsLogGroup_disappears(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -140,7 +140,7 @@ func TestAccLogsGroup_disappears(t *testing.T) { }) } -func TestAccLogsGroup_kmsKey(t *testing.T) { +func TestAccLogsLogGroup_kmsKey(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -184,7 +184,7 @@ func TestAccLogsGroup_kmsKey(t *testing.T) { }) } -func TestAccLogsGroup_logGroupClass(t *testing.T) { +func TestAccLogsLogGroup_logGroupClass(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -211,7 +211,7 @@ func TestAccLogsGroup_logGroupClass(t *testing.T) { }) } -func TestAccLogsGroup_retentionPolicy(t *testing.T) { +func TestAccLogsLogGroup_retentionPolicy(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -253,7 +253,7 @@ func TestAccLogsGroup_retentionPolicy(t *testing.T) { }) } -func TestAccLogsGroup_multiple(t *testing.T) { +func TestAccLogsLogGroup_multiple(t *testing.T) { ctx := acctest.Context(t) var v1, v2, v3 types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -279,7 +279,7 @@ func TestAccLogsGroup_multiple(t *testing.T) { }) } -func TestAccLogsGroup_skipDestroy(t *testing.T) { +func TestAccLogsLogGroup_skipDestroy(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -302,7 +302,7 @@ func TestAccLogsGroup_skipDestroy(t *testing.T) { }) } -func TestAccLogsGroup_skipDestroyInconsistentPlan(t *testing.T) { +func TestAccLogsLogGroup_skipDestroyInconsistentPlan(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -334,7 +334,7 @@ func TestAccLogsGroup_skipDestroyInconsistentPlan(t *testing.T) { // Test whether the log group is successfully created with the DELIVERY log group class when retention_in_days is set. // Even if retention_in_days is changed in the configuration, the diff should be suppressed and the plan should be empty. -func TestAccLogsGroup_logGroupClassDELIVERY1(t *testing.T) { +func TestAccLogsLogGroup_logGroupClassDELIVERY1(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) @@ -375,7 +375,7 @@ func TestAccLogsGroup_logGroupClassDELIVERY1(t *testing.T) { } // Test whether the log group is successfully created with the DELIVERY log group class when retention_in_days is not set. -func TestAccLogsGroup_logGroupClassDELIVERY2(t *testing.T) { +func TestAccLogsLogGroup_logGroupClassDELIVERY2(t *testing.T) { ctx := acctest.Context(t) var v types.LogGroup rName := acctest.RandomWithPrefix(t, acctest.ResourcePrefix) diff --git a/internal/service/logs/groups_data_source.go b/internal/service/logs/groups_data_source.go index e1569aba14dc..f21342cb3526 100644 --- a/internal/service/logs/groups_data_source.go +++ b/internal/service/logs/groups_data_source.go @@ -50,17 +50,15 @@ func dataSourceGroupsRead(ctx context.Context, d *schema.ResourceData, meta any) input.LogGroupNamePrefix = aws.String(v.(string)) } - output, err := findLogGroups(ctx, conn, &input, tfslices.PredicateTrue[*awstypes.LogGroup]()) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading CloudWatch Log Groups: %s", err) - } - d.SetId(meta.(*conns.AWSClient).Region(ctx)) var arns, logGroupNames []string - for _, v := range output { - arns = append(arns, trimLogGroupARNWildcardSuffix(aws.ToString(v.Arn))) - logGroupNames = append(logGroupNames, aws.ToString(v.LogGroupName)) + for output, err := range listLogGroups(ctx, conn, &input, tfslices.PredicateTrue[*awstypes.LogGroup]()) { + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading CloudWatch Log Groups: %s", err) + } + + arns = append(arns, trimLogGroupARNWildcardSuffix(aws.ToString(output.Arn))) + logGroupNames = append(logGroupNames, aws.ToString(output.LogGroupName)) } d.Set(names.AttrARNs, arns) d.Set("log_group_names", logGroupNames) diff --git a/internal/service/logs/service_package_gen.go b/internal/service/logs/service_package_gen.go index d2150587f372..91e1c1b31d1a 100644 --- a/internal/service/logs/service_package_gen.go +++ b/internal/service/logs/service_package_gen.go @@ -4,6 +4,8 @@ package logs import ( "context" + "iter" + "slices" "unique" "github.com/aws/aws-sdk-go-v2/aws" @@ -175,6 +177,21 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa } } +func (p *servicePackage) SDKListResources(ctx context.Context) iter.Seq[*inttypes.ServicePackageSDKListResource] { + return slices.Values([]*inttypes.ServicePackageSDKListResource{ + { + Factory: logGroupResourceAsListResource, + TypeName: "aws_cloudwatch_log_group", + Name: "Log Group", + Region: unique.Make(inttypes.ResourceRegionDefault()), + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Identity: inttypes.RegionalSingleParameterIdentity(names.AttrName), + }, + }) +} + func (p *servicePackage) ServicePackageName() string { return names.Logs } diff --git a/internal/service/logs/testdata/LogGroup/list_basic/main.tf b/internal/service/logs/testdata/LogGroup/list_basic/main.tf new file mode 100644 index 000000000000..9940e2af0f4a --- /dev/null +++ b/internal/service/logs/testdata/LogGroup/list_basic/main.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_cloudwatch_log_group" "test" { + count = 3 + + name = "${var.rName}-${count.index}" + + retention_in_days = 1 +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} diff --git a/internal/service/logs/testdata/LogGroup/list_basic/main.tfquery.hcl b/internal/service/logs/testdata/LogGroup/list_basic/main.tfquery.hcl new file mode 100644 index 000000000000..a43f8cba1786 --- /dev/null +++ b/internal/service/logs/testdata/LogGroup/list_basic/main.tfquery.hcl @@ -0,0 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_cloudwatch_log_group" "test" { + provider = aws +} diff --git a/internal/service/logs/testdata/LogGroup/list_region_override/main.tf b/internal/service/logs/testdata/LogGroup/list_region_override/main.tf new file mode 100644 index 000000000000..158832cffddd --- /dev/null +++ b/internal/service/logs/testdata/LogGroup/list_region_override/main.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" {} + +resource "aws_cloudwatch_log_group" "test" { + count = 3 + + region = var.region + + name = "${var.rName}-${count.index}" + + retention_in_days = 1 +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "region" { + description = "Region to deploy resource in" + type = string + nullable = false +} diff --git a/internal/service/logs/testdata/LogGroup/list_region_override/main.tfquery.hcl b/internal/service/logs/testdata/LogGroup/list_region_override/main.tfquery.hcl new file mode 100644 index 000000000000..8a50ec9defca --- /dev/null +++ b/internal/service/logs/testdata/LogGroup/list_region_override/main.tfquery.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +list "aws_cloudwatch_log_group" "test" { + provider = aws + + config { + region = var.region + } +} diff --git a/internal/service/odb/cloud_vm_cluster.go b/internal/service/odb/cloud_vm_cluster.go new file mode 100644 index 000000000000..52f92f21488e --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster.go @@ -0,0 +1,623 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_odb_cloud_vm_cluster", name="Cloud Vm Cluster") +// @Tags(identifierAttribute="arn") +func newResourceCloudVmCluster(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceCloudVmCluster{} + + r.SetDefaultCreateTimeout(24 * time.Hour) + r.SetDefaultUpdateTimeout(24 * time.Hour) + r.SetDefaultDeleteTimeout(24 * time.Hour) + + return r, nil +} + +const ( + ResNameCloudVmCluster = "Cloud Vm Cluster" +) + +var ResourceCloudVmCluster = newResourceCloudVmCluster + +type resourceCloudVmCluster struct { + framework.ResourceWithModel[cloudVmClusterResourceModel] + framework.WithTimeouts + framework.WithImportByID +} + +func (r *resourceCloudVmCluster) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + licenseModelType := fwtypes.StringEnumType[odbtypes.LicenseModel]() + diskRedundancyType := fwtypes.StringEnumType[odbtypes.DiskRedundancy]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource.", + }, + names.AttrClusterName: schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource.", + }, + "cpu_core_count": schema.Int32Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + }, + Description: "The number of CPU cores to enable on the VM cluster. Changing this will create a new resource.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.RequiresReplace(), + float64planmodifier.UseStateForUnknown(), + }, + Description: "The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "db_servers": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Required: true, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Description: "The list of database servers for the VM cluster. Changing this will create a new resource.", + }, + "disk_redundancy": schema.StringAttribute{ + CustomType: diskRedundancyType, + Computed: true, + Description: "The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way).", + }, + names.AttrDisplayName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + Description: "A user-friendly name for the VM cluster. This member is required. Changing this will create a new resource.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + Description: "The domain name associated with the VM cluster.", + }, + "gi_version": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 This member is required. Changing this will create a new resource.", + }, + //Underlying API treats Hostname as hostname prefix. Therefore, explicitly setting it. API also returns new hostname prefix by appending the input hostname + //prefix. Therefore, we have hostname_prefix and hostname_prefix_computed + "hostname_prefix_computed": schema.StringAttribute{ + Computed: true, + Description: "The host name for the VM cluster. Constraints: - Can't be \"localhost\" or \"hostname\". - Can't contain \"-version\". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. " + + "This member is required. Changing this will create a new resource.", + }, + "hostname_prefix": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The host name prefix for the VM cluster. Constraints: - Can't be \"localhost\" or \"hostname\". - Can't contain \"-version\". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. " + + "This member is required. Changing this will create a new resource.", + }, + "iorm_config_cache": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudVMCExadataIormConfigResourceModel](ctx), + Description: "The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster.", + }, + "is_local_backup_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + Description: "Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource.", + }, + "is_sparse_diskgroup_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + Description: "Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource.", + }, + "last_update_history_entry_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the most recent maintenance update history entry.", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModelType, + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource.", + }, + "listener_port": schema.Int32Attribute{ + Computed: true, + Description: "The listener port number configured on the VM cluster.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource.", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + Description: "The total number of nodes in the VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID (Oracle Cloud Identifier) of the VM cluster.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI resource anchor associated with the VM cluster.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The HTTPS link to the VM cluster resource in OCI.", + }, + "odb_network_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Description: "The unique identifier of the ODB network for the VM cluster. This member is required. Changing this will create a new resource.", + }, + "percent_progress": schema.Float32Attribute{ + Computed: true, + Description: "The percentage of progress made on the current operation for the VM cluster.", + }, + "scan_dns_name": schema.StringAttribute{ + Computed: true, + Description: "The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster.", + }, + "scan_dns_record_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the DNS record for the SCAN IPs linked to the VM cluster.", + }, + "scan_ip_ids": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + Description: "The list of OCIDs for SCAN IP addresses associated with the VM cluster.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The hardware model name of the Exadata infrastructure running the VM cluster.", + }, + "ssh_public_keys": schema.SetAttribute{ + Required: true, + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + PlanModifiers: []planmodifier.Set{ + setplanmodifier.RequiresReplace(), + }, + Description: "The public key portion of one or more key pairs used for SSH access to the VM cluster. This member is required. Changing this will create a new resource.", + }, + names.AttrStatus: schema.StringAttribute{ + Computed: true, + CustomType: statusType, + Description: "The current lifecycle status of the VM cluster.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information regarding the current status of the VM cluster.", + }, + "storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The local node storage allocated to the VM cluster, in gigabytes (GB).", + }, + "system_version": schema.StringAttribute{ + Computed: true, + Description: "The operating system version of the image chosen for the VM cluster.", + }, + "scan_listener_port_tcp": schema.Int32Attribute{ + Computed: true, + Optional: true, + PlanModifiers: []planmodifier.Int32{ + int32planmodifier.RequiresReplace(), + int32planmodifier.UseStateForUnknown(), + }, + Description: "The port number for TCP connections to the single client access name (SCAN) listener. " + + "Valid values: 1024–8999 with the following exceptions: 2484 , 6100 , 6200 , 7060, 7070 , 7085 , and 7879Default: 1521. " + + "Changing this will create a new resource.", + }, + "timezone": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + stringplanmodifier.UseStateForUnknown(), + }, + Description: "The configured time zone of the VM cluster. Changing this will create a new resource.", + }, + "vip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The timestamp when the VM cluster was created.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + Description: "The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading.", + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + "data_collection_options": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[cloudVMCDataCollectionOptionsResourceModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + Description: "The set of preferences for the various diagnostic collection options for the VM cluster. Changing this will create a new resource.", + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "is_diagnostics_events_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "is_health_monitoring_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + "is_incident_logs_enabled": schema.BoolAttribute{ + Required: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + } +} + +func (r *resourceCloudVmCluster) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().ODBClient(ctx) + var plan cloudVmClusterResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.CreateCloudVmClusterInput{ + Tags: getTagsIn(ctx), + //Underlying API treats Hostname as hostname prefix. + Hostname: plan.HostnamePrefix.ValueStringPointer(), + } + resp.Diagnostics.Append(flex.Expand(ctx, plan, &input)...) + if resp.Diagnostics.HasError() { + return + } + out, err := conn.CreateCloudVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + if out == nil || out.CloudVmClusterId == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionCreating, ResNameCloudVmCluster, plan.DisplayName.ValueString(), nil), + errors.New("empty output").Error(), + ) + return + } + + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + createdVmCluster, err := waitCloudVmClusterCreated(ctx, conn, *out.CloudVmClusterId, createTimeout) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(out.CloudVmClusterId))...) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForCreation, ResNameCloudVmCluster, plan.DisplayName.ValueString(), err), + err.Error(), + ) + return + } + hostnamePrefix := strings.Split(*input.Hostname, "-")[0] + plan.HostnamePrefix = types.StringValue(hostnamePrefix) + plan.HostnamePrefixComputed = types.StringValue(*createdVmCluster.Hostname) + //scan listener port not returned by API directly + plan.ScanListenerPortTcp = types.Int32PointerValue(createdVmCluster.ListenerPort) + resp.Diagnostics.Append(flex.Flatten(ctx, createdVmCluster, &plan)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceCloudVmCluster) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().ODBClient(ctx) + var state cloudVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := FindCloudVmClusterForResourceByID(ctx, conn, state.CloudVmClusterId.ValueString()) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + hostnamePrefix := strings.Split(*out.Hostname, "-")[0] + state.HostnamePrefix = types.StringValue(hostnamePrefix) + state.HostnamePrefixComputed = types.StringValue(*out.Hostname) + //scan listener port not returned by API directly + state.ScanListenerPortTcp = types.Int32PointerValue(out.ListenerPort) + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceCloudVmCluster) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().ODBClient(ctx) + var state cloudVmClusterResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + input := odb.DeleteCloudVmClusterInput{ + CloudVmClusterId: state.CloudVmClusterId.ValueStringPointer(), + } + _, err := conn.DeleteCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionDeleting, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitCloudVmClusterDeleted(ctx, conn, state.CloudVmClusterId.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionWaitingForDeletion, ResNameCloudVmCluster, state.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } +} + +func waitCloudVmClusterCreated(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusProvisioning), + Target: enum.Slice(odbtypes.ResourceStatusAvailable, odbtypes.ResourceStatusFailed), + Refresh: statusCloudVmCluster(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudVmCluster); ok { + return out, err + } + + return nil, err +} + +func waitCloudVmClusterDeleted(ctx context.Context, conn *odb.Client, id string, timeout time.Duration) (*odbtypes.CloudVmCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(odbtypes.ResourceStatusTerminating), + Target: []string{}, + Refresh: statusCloudVmCluster(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*odbtypes.CloudVmCluster); ok { + return out, err + } + + return nil, err +} + +func statusCloudVmCluster(ctx context.Context, conn *odb.Client, id string) retry.StateRefreshFunc { + return func() (any, string, error) { + out, err := FindCloudVmClusterForResourceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func FindCloudVmClusterForResourceByID(ctx context.Context, conn *odb.Client, id string) (*odbtypes.CloudVmCluster, error) { + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: aws.String(id), + } + out, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + if errs.IsA[*odbtypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: &input, + } + } + return nil, err + } + + if out == nil || out.CloudVmCluster == nil { + return nil, tfresource.NewEmptyResultError(&input) + } + return out.CloudVmCluster, nil +} + +type cloudVmClusterResourceModel struct { + framework.WithRegionModel + CloudVmClusterArn types.String `tfsdk:"arn"` + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + CloudVmClusterId types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DataCollectionOptions fwtypes.ListNestedObjectValueOf[cloudVMCDataCollectionOptionsResourceModel] `tfsdk:"data_collection_options"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServers fwtypes.SetValueOf[types.String] `tfsdk:"db_servers"` + DiskRedundancy fwtypes.StringEnum[odbtypes.DiskRedundancy] `tfsdk:"disk_redundancy"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + GiVersion types.String `tfsdk:"gi_version"` + HostnamePrefixComputed types.String `tfsdk:"hostname_prefix_computed" autoflex:",noflatten"` + HostnamePrefix types.String `tfsdk:"hostname_prefix" autoflex:"-"` + IormConfigCache fwtypes.ListNestedObjectValueOf[cloudVMCExadataIormConfigResourceModel] `tfsdk:"iorm_config_cache"` + IsLocalBackupEnabled types.Bool `tfsdk:"is_local_backup_enabled"` + IsSparseDiskGroupEnabled types.Bool `tfsdk:"is_sparse_diskgroup_enabled"` + LastUpdateHistoryEntryId types.String `tfsdk:"last_update_history_entry_id"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + ListenerPort types.Int32 `tfsdk:"listener_port"` + MemorySizeInGbs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float32 `tfsdk:"percent_progress"` + ScanDnsName types.String `tfsdk:"scan_dns_name"` + ScanDnsRecordId types.String `tfsdk:"scan_dns_record_id"` + ScanIpIds fwtypes.ListValueOf[types.String] `tfsdk:"scan_ip_ids"` + Shape types.String `tfsdk:"shape"` + SshPublicKeys fwtypes.SetValueOf[types.String] `tfsdk:"ssh_public_keys"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageSizeInGBs types.Int32 `tfsdk:"storage_size_in_gbs"` + SystemVersion types.String `tfsdk:"system_version"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + Timezone types.String `tfsdk:"timezone"` + VipIds fwtypes.ListValueOf[types.String] `tfsdk:"vip_ids"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + ScanListenerPortTcp types.Int32 `tfsdk:"scan_listener_port_tcp" autoflex:",noflatten"` + Tags tftags.Map `tfsdk:"tags"` + TagsAll tftags.Map `tfsdk:"tags_all"` +} + +type cloudVMCDataCollectionOptionsResourceModel struct { + IsDiagnosticsEventsEnabled types.Bool `tfsdk:"is_diagnostics_events_enabled"` + IsHealthMonitoringEnabled types.Bool `tfsdk:"is_health_monitoring_enabled"` + IsIncidentLogsEnabled types.Bool `tfsdk:"is_incident_logs_enabled"` +} + +type cloudVMCExadataIormConfigResourceModel struct { + DbPlans fwtypes.ListNestedObjectValueOf[cloudVMCDbIormConfigResourceModel] `tfsdk:"db_plans"` + LifecycleDetails types.String `tfsdk:"lifecycle_details"` + LifecycleState fwtypes.StringEnum[odbtypes.IormLifecycleState] `tfsdk:"lifecycle_state"` + Objective fwtypes.StringEnum[odbtypes.Objective] `tfsdk:"objective"` +} + +type cloudVMCDbIormConfigResourceModel struct { + DbName types.String `tfsdk:"db_name"` + FlashCacheLimit types.String `tfsdk:"flash_cache_limit"` + Share types.Int32 `tfsdk:"share"` +} diff --git a/internal/service/odb/cloud_vm_cluster_data_source.go b/internal/service/odb/cloud_vm_cluster_data_source.go new file mode 100644 index 000000000000..41dea592991d --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_data_source.go @@ -0,0 +1,317 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_odb_cloud_vm_cluster", name="Cloud Vm Cluster") +// @Tags(identifierAttribute="arn") +func newDataSourceCloudVmCluster(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceCloudVmCluster{}, nil +} + +const ( + DSNameCloudVmCluster = "Cloud Vm Cluster Data Source" +) + +type dataSourceCloudVmCluster struct { + framework.DataSourceWithModel[dataSourceCloudVmClusterModel] +} + +func (d *dataSourceCloudVmCluster) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + statusType := fwtypes.StringEnumType[odbtypes.ResourceStatus]() + diskRedundancyType := fwtypes.StringEnumType[odbtypes.DiskRedundancy]() + licenseModelType := fwtypes.StringEnumType[odbtypes.LicenseModel]() + computeModelType := fwtypes.StringEnumType[odbtypes.ComputeModel]() + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: schema.StringAttribute{ + Required: true, + Description: "The unique identifier of the VM cluster.", + }, + "cloud_exadata_infrastructure_id": schema.StringAttribute{ + Computed: true, + Description: "The ID of the Cloud Exadata Infrastructure.", + }, + names.AttrClusterName: schema.StringAttribute{ + Computed: true, + Description: "The name of the Grid Infrastructure (GI) cluster.", + }, + "cpu_core_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of CPU cores enabled on the VM cluster.", + }, + "data_storage_size_in_tbs": schema.Float64Attribute{ + Computed: true, + Description: "The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster.", + }, + "db_node_storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster.", + }, + "db_servers": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The list of database servers for the VM cluster.", + }, + "disk_redundancy": schema.StringAttribute{ + CustomType: diskRedundancyType, + Computed: true, + Description: "The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy.", + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + Description: "The display name of the VM cluster.", + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + Description: "The domain name of the VM cluster.", + }, + "gi_version": schema.StringAttribute{ + Computed: true, + Description: "he software version of the Oracle Grid Infrastructure (GI) for the VM cluster.", + }, + "hostname_prefix_computed": schema.StringAttribute{ + Computed: true, + Description: "The computed hostname prefix for the VM cluster.", + }, + "is_local_backup_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Indicates whether database backups to local Exadata storage is enabled for the VM cluster.", + }, + "is_sparse_disk_group_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Indicates whether the VM cluster is configured with a sparse disk group.", + }, + "last_update_history_entry_id": schema.StringAttribute{ + Computed: true, + Description: "The Oracle Cloud ID (OCID) of the last maintenance update history entry.", + }, + "license_model": schema.StringAttribute{ + CustomType: licenseModelType, + Computed: true, + Description: "The Oracle license model applied to the VM cluster.", + }, + "listener_port": schema.Int32Attribute{ + Computed: true, + Description: "The port number configured for the listener on the VM cluster.", + }, + "memory_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of memory, in gigabytes (GB), that's allocated for the VM cluster.", + }, + "node_count": schema.Int32Attribute{ + Computed: true, + Description: "The number of nodes in the VM cluster.", + }, + "ocid": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the VM cluster.", + }, + "oci_resource_anchor_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the OCI Resource Anchor.", + }, + "oci_url": schema.StringAttribute{ + Computed: true, + Description: "The HTTPS link to the VM cluster in OCI.", + }, + "odb_network_id": schema.StringAttribute{ + Computed: true, + Description: "The ID of the ODB network.", + }, + "percent_progress": schema.Float64Attribute{ + Computed: true, + Description: "The amount of progress made on the current operation on the VM cluster,expressed as a percentage.", + }, + "scan_dns_name": schema.StringAttribute{ + Computed: true, + Description: "The FQDN of the DNS record for the Single Client Access Name (SCAN) IP\n" + + " addresses that are associated with the VM cluster.", + }, + "scan_dns_record_id": schema.StringAttribute{ + Computed: true, + Description: "The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster.", + }, + "scan_ip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The OCID of the SCAN IP addresses that are associated with the VM cluster.", + }, + "shape": schema.StringAttribute{ + Computed: true, + Description: "The hardware model name of the Exadata infrastructure that's running the VM cluster.", + }, + "ssh_public_keys": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "he public key portion of one or more key pairs used for SSH access to the VM cluster.", + }, + names.AttrStatus: schema.StringAttribute{ + CustomType: statusType, + Computed: true, + Description: "The status of the VM cluster.", + }, + names.AttrStatusReason: schema.StringAttribute{ + Computed: true, + Description: "Additional information about the status of the VM cluster.", + }, + "storage_size_in_gbs": schema.Int32Attribute{ + Computed: true, + Description: "The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster.", + }, + "system_version": schema.StringAttribute{ + Computed: true, + Description: "The operating system version of the image chosen for the VM cluster.", + }, + "timezone": schema.StringAttribute{ + Computed: true, + Description: "The time zone of the VM cluster.", + }, + "vip_ids": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The virtual IP (VIP) addresses that are associated with the VM cluster.\n" + + "Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for\n" + + "each node in the VM cluster to enable failover. If one node fails, the VIP is\n" + + "reassigned to another active node in the cluster.", + }, + names.AttrCreatedAt: schema.StringAttribute{ + Computed: true, + CustomType: timetypes.RFC3339Type{}, + Description: "The time when the VM cluster was created.", + }, + "compute_model": schema.StringAttribute{ + CustomType: computeModelType, + Computed: true, + Description: "The OCI model compute model used when you create or clone an instance: ECPU or\n" + + "OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on\n" + + "the number of cores elastically allocated from a pool of compute and storage\n" + + "servers. An OCPU is a legacy physical measure of compute resources. OCPUs are\n" + + "based on the physical core of a processor with hyper-threading enabled.", + }, + "data_collection_options": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[dataCollectionOptionsVMCDataSourceModel](ctx), + Description: "The set of diagnostic collection options enabled for the VM cluster.", + }, + "iorm_config_cache": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.NewListNestedObjectTypeOf[exadataIormConfigVMCDataSourceModel](ctx), + Description: "The ExadataIormConfig cache details for the VM cluster.", + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (d *dataSourceCloudVmCluster) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().ODBClient(ctx) + var data dataSourceCloudVmClusterModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + input := odb.GetCloudVmClusterInput{ + CloudVmClusterId: data.CloudVmClusterId.ValueStringPointer(), + } + out, err := conn.GetCloudVmCluster(ctx, &input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.ODB, create.ErrActionReading, DSNameCloudVmCluster, data.CloudVmClusterId.ValueString(), err), + err.Error(), + ) + return + } + data.HostnamePrefixComputed = types.StringValue(*out.CloudVmCluster.Hostname) + resp.Diagnostics.Append(flex.Flatten(ctx, out.CloudVmCluster, &data)...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dataSourceCloudVmClusterModel struct { + framework.WithRegionModel + CloudExadataInfrastructureId types.String `tfsdk:"cloud_exadata_infrastructure_id"` + CloudVmClusterArn types.String `tfsdk:"arn"` + CloudVmClusterId types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + CpuCoreCount types.Int32 `tfsdk:"cpu_core_count"` + DataCollectionOptions fwtypes.ListNestedObjectValueOf[dataCollectionOptionsVMCDataSourceModel] `tfsdk:"data_collection_options"` + DataStorageSizeInTBs types.Float64 `tfsdk:"data_storage_size_in_tbs"` + DbNodeStorageSizeInGBs types.Int32 `tfsdk:"db_node_storage_size_in_gbs"` + DbServers fwtypes.ListValueOf[types.String] `tfsdk:"db_servers"` + DiskRedundancy fwtypes.StringEnum[odbtypes.DiskRedundancy] `tfsdk:"disk_redundancy"` + DisplayName types.String `tfsdk:"display_name"` + Domain types.String `tfsdk:"domain"` + GiVersion types.String `tfsdk:"gi_version"` + HostnamePrefixComputed types.String `tfsdk:"hostname_prefix_computed" autoflex:",noflatten"` + IormConfigCache fwtypes.ListNestedObjectValueOf[exadataIormConfigVMCDataSourceModel] `tfsdk:"iorm_config_cache"` + IsLocalBackupEnabled types.Bool `tfsdk:"is_local_backup_enabled"` + IsSparseDiskGroupEnabled types.Bool `tfsdk:"is_sparse_disk_group_enabled"` + LastUpdateHistoryEntryId types.String `tfsdk:"last_update_history_entry_id"` + LicenseModel fwtypes.StringEnum[odbtypes.LicenseModel] `tfsdk:"license_model"` + ListenerPort types.Int32 `tfsdk:"listener_port"` + MemorySizeInGbs types.Int32 `tfsdk:"memory_size_in_gbs"` + NodeCount types.Int32 `tfsdk:"node_count"` + Ocid types.String `tfsdk:"ocid"` + OciResourceAnchorName types.String `tfsdk:"oci_resource_anchor_name"` + OciUrl types.String `tfsdk:"oci_url"` + OdbNetworkId types.String `tfsdk:"odb_network_id"` + PercentProgress types.Float64 `tfsdk:"percent_progress"` + ScanDnsName types.String `tfsdk:"scan_dns_name"` + ScanDnsRecordId types.String `tfsdk:"scan_dns_record_id"` + ScanIpIds fwtypes.ListValueOf[types.String] `tfsdk:"scan_ip_ids"` + Shape types.String `tfsdk:"shape"` + SshPublicKeys fwtypes.ListValueOf[types.String] `tfsdk:"ssh_public_keys"` + Status fwtypes.StringEnum[odbtypes.ResourceStatus] `tfsdk:"status"` + StatusReason types.String `tfsdk:"status_reason"` + StorageSizeInGBs types.Int32 `tfsdk:"storage_size_in_gbs"` + SystemVersion types.String `tfsdk:"system_version"` + Timezone types.String `tfsdk:"timezone"` + VipIds fwtypes.ListValueOf[types.String] `tfsdk:"vip_ids"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ComputeModel fwtypes.StringEnum[odbtypes.ComputeModel] `tfsdk:"compute_model"` + Tags tftags.Map `tfsdk:"tags"` +} + +type dataCollectionOptionsVMCDataSourceModel struct { + IsDiagnosticsEventsEnabled types.Bool `tfsdk:"is_diagnostics_events_enabled"` + IsHealthMonitoringEnabled types.Bool `tfsdk:"is_health_monitoring_enabled"` + IsIncidentLogsEnabled types.Bool `tfsdk:"is_incident_logs_enabled"` +} + +type exadataIormConfigVMCDataSourceModel struct { + DbPlans fwtypes.ListNestedObjectValueOf[dbIormConfigVMCDatasourceModel] `tfsdk:"db_plans"` + LifecycleDetails types.String `tfsdk:"lifecycle_details"` + LifecycleState fwtypes.StringEnum[odbtypes.IormLifecycleState] `tfsdk:"lifecycle_state"` + Objective fwtypes.StringEnum[odbtypes.Objective] `tfsdk:"objective"` +} + +type dbIormConfigVMCDatasourceModel struct { + DbName types.String `tfsdk:"db_name"` + FlashCacheLimit types.String `tfsdk:"flash_cache_limit"` + Share types.Int32 `tfsdk:"share"` +} diff --git a/internal/service/odb/cloud_vm_cluster_data_source_test.go b/internal/service/odb/cloud_vm_cluster_data_source_test.go new file mode 100644 index 000000000000..dcd72b9d8d5c --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_data_source_test.go @@ -0,0 +1,188 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type cloudVmClusterDSTest struct { + vmClusterDisplayNamePrefix string + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string +} + +var vmClusterTestDS = cloudVmClusterDSTest{ + vmClusterDisplayNamePrefix: "Ofake-vmc", + exaInfraDisplayNamePrefix: "Ofake-exa-infra", + odbNetDisplayNamePrefix: "odb-net", +} + +func TestAccODBCloudVmClusterDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var cloudvmcluster odbtypes.CloudVmCluster + odbNetRName := sdkacctest.RandomWithPrefix(vmClusterTestDS.odbNetDisplayNamePrefix) + exaInfraRName := sdkacctest.RandomWithPrefix(vmClusterTestDS.exaInfraDisplayNamePrefix) + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestDS.vmClusterDisplayNamePrefix) + dataSourceName := "data.aws_odb_cloud_vm_cluster.test" + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestDS.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestDS.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestDS.cloudVMClusterConfig(odbNetRName, exaInfraRName, vmcDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestDS.testAccCheckCloudVmClusterExists(ctx, dataSourceName, &cloudvmcluster), + resource.TestCheckResourceAttr(dataSourceName, names.AttrDisplayName, vmcDisplayName), + ), + }, + }, + }) +} + +func (cloudVmClusterDSTest) testAccCheckCloudVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + _, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (cloudVmClusterDSTest) testAccCheckCloudVmClusterExists(ctx context.Context, name string, cloudvmcluster *odbtypes.CloudVmCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not found")) + } + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not set")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + *cloudvmcluster = *resp + return nil + } +} + +func (cloudVmClusterDSTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudVmClustersInput{} + _, err := conn.ListCloudVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (cloudVmClusterDSTest) cloudVMClusterConfig(odbNet, exaInfra, displayName, sshKey string) string { + dsTfCodeVmCluster := fmt.Sprintf(` + + +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} + +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} + +data "aws_odb_db_servers_list" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers_list.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + } + +} + +data "aws_odb_cloud_vm_cluster" "test" { + id = aws_odb_cloud_vm_cluster.test.id +} +`, odbNet, exaInfra, displayName, sshKey) + return dsTfCodeVmCluster +} diff --git a/internal/service/odb/cloud_vm_cluster_test.go b/internal/service/odb/cloud_vm_cluster_test.go new file mode 100644 index 000000000000..118f1e298f59 --- /dev/null +++ b/internal/service/odb/cloud_vm_cluster_test.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package odb_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/odb" + odbtypes "github.com/aws/aws-sdk-go-v2/service/odb/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfodb "github.com/hashicorp/terraform-provider-aws/internal/service/odb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type cloudVmClusterResourceTest struct { + vmClusterDisplayNamePrefix string + exaInfraDisplayNamePrefix string + odbNetDisplayNamePrefix string +} + +var vmClusterTestEntity = cloudVmClusterResourceTest{ + vmClusterDisplayNamePrefix: "Ofake-vmc", + exaInfraDisplayNamePrefix: "Ofake-exa-infra", + odbNetDisplayNamePrefix: "odb-net", +} + +func TestAccODBCloudVmCluster_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resourceName := "aws_odb_cloud_vm_cluster.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmcDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_allParams(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmcClusterDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resourceName := "aws_odb_cloud_vm_cluster.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestEntity.cloudVmClusterWithAllParameters(vmcClusterDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_taggingTest(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster1 odbtypes.CloudVmCluster + var cloudvmcluster2 odbtypes.CloudVmCluster + vmcDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + resourceName := "aws_odb_cloud_vm_cluster.test" + + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmcDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + return nil + }), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: vmClusterTestEntity.testAccCloudVmClusterConfigUpdatedTags(vmcDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), + resource.TestCheckResourceAttr(resourceName, "tags.env", "dev"), + resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster2), + resource.ComposeTestCheckFunc(func(state *terraform.State) error { + if strings.Compare(*(cloudvmcluster1.CloudVmClusterId), *(cloudvmcluster2.CloudVmClusterId)) != 0 { + return errors.New("Should not create a new cloud vm cluster for tag update") + } + return nil + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccODBCloudVmCluster_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var cloudvmcluster odbtypes.CloudVmCluster + vmClusterDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.vmClusterDisplayNamePrefix) + resourceName := "aws_odb_cloud_vm_cluster.test" + publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) + if err != nil { + t.Fatal(err) + return + } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + vmClusterTestEntity.testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ODBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: vmClusterTestEntity.testAccCheckCloudVmClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: vmClusterTestEntity.testAccCloudVmClusterConfigBasic(vmClusterDisplayName, publicKey), + Check: resource.ComposeAggregateTestCheckFunc( + vmClusterTestEntity.testAccCheckCloudVmClusterExists(ctx, resourceName, &cloudvmcluster), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfodb.ResourceCloudVmCluster, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func (cloudVmClusterResourceTest) testAccCheckCloudVmClusterDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_odb_cloud_vm_cluster" { + continue + } + _, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + return create.Error(names.ODB, create.ErrActionCheckingDestroyed, tfodb.ResNameCloudVmCluster, rs.Primary.ID, errors.New("not destroyed")) + } + return nil + } +} + +func (cloudVmClusterResourceTest) testAccCheckCloudVmClusterExists(ctx context.Context, name string, cloudvmcluster *odbtypes.CloudVmCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not found")) + } + if rs.Primary.ID == "" { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, name, errors.New("not set")) + } + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + resp, err := tfodb.FindCloudVmClusterForResourceByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.ODB, create.ErrActionCheckingExistence, tfodb.ResNameCloudVmCluster, rs.Primary.ID, err) + } + *cloudvmcluster = *resp + return nil + } +} + +func (cloudVmClusterResourceTest) testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).ODBClient(ctx) + input := odb.ListCloudVmClustersInput{} + _, err := conn.ListCloudVmClusters(ctx, &input) + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func (cloudVmClusterResourceTest) testAccCloudVmClusterConfigBasic(vmClusterDisplayName, sshKey string) string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.exaInfraDisplayNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + res := fmt.Sprintf(` + +%s + +%s + +data "aws_odb_db_servers_list" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers_list.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + return res +} +func (cloudVmClusterResourceTest) cloudVmClusterWithAllParameters(vmClusterDisplayName, sshKey string) string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.exaInfraDisplayNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + + res := fmt.Sprintf(` + +%s + +%s + + +data "aws_odb_db_servers_list" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers_list.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + return res +} + +func (cloudVmClusterResourceTest) testAccCloudVmClusterConfigUpdatedTags(vmClusterDisplayName, sshKey string) string { + exaInfraDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.exaInfraDisplayNamePrefix) + odbNetDisplayName := sdkacctest.RandomWithPrefix(vmClusterTestEntity.odbNetDisplayNamePrefix) + exaInfra := vmClusterTestEntity.exaInfra(exaInfraDisplayName) + odbNet := vmClusterTestEntity.oracleDBNetwork(odbNetDisplayName) + res := fmt.Sprintf(` +%s + +%s + +data "aws_odb_db_servers_list" "test" { + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id +} + +resource "aws_odb_cloud_vm_cluster" "test" { + display_name = %[3]q + cloud_exadata_infrastructure_id = aws_odb_cloud_exadata_infrastructure.test.id + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["%[4]s"] + odb_network_id = aws_odb_network.test.id + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = [for db_server in data.aws_odb_db_servers_list.test.db_servers : db_server.id] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } + tags = { + "env" = "dev" + "foo" = "bar" + } + +} +`, exaInfra, odbNet, vmClusterDisplayName, sshKey) + return res +} + +func (cloudVmClusterResourceTest) exaInfra(rName string) string { + resource := fmt.Sprintf(` +resource "aws_odb_cloud_exadata_infrastructure" "test" { + display_name = %[1]q + shape = "Exadata.X9M" + storage_count = 3 + compute_count = 2 + availability_zone_id = "use1-az6" + maintenance_window { + custom_action_timeout_in_mins = 16 + is_custom_action_timeout_enabled = true + patching_mode = "ROLLING" + preference = "NO_PREFERENCE" + } +} +`, rName) + return resource +} + +func (cloudVmClusterResourceTest) oracleDBNetwork(rName string) string { + resource := fmt.Sprintf(` +resource "aws_odb_network" "test" { + display_name = %[1]q + availability_zone_id = "use1-az6" + client_subnet_cidr = "10.2.0.0/24" + backup_subnet_cidr = "10.2.1.0/24" + s3_access = "DISABLED" + zero_etl_access = "DISABLED" +} +`, rName) + return resource +} diff --git a/internal/service/odb/service_package_gen.go b/internal/service/odb/service_package_gen.go index ad718c464ae7..8495fcc150e5 100644 --- a/internal/service/odb/service_package_gen.go +++ b/internal/service/odb/service_package_gen.go @@ -37,6 +37,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newDataSourceCloudVmCluster, + TypeName: "aws_odb_cloud_vm_cluster", + Name: "Cloud Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newDataSourceNetwork, TypeName: "aws_odb_network", @@ -75,6 +84,15 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.Ser }), Region: unique.Make(inttypes.ResourceRegionDefault()), }, + { + Factory: newResourceCloudVmCluster, + TypeName: "aws_odb_cloud_vm_cluster", + Name: "Cloud Vm Cluster", + Tags: unique.Make(inttypes.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }), + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newResourceNetwork, TypeName: "aws_odb_network", diff --git a/internal/service/ses/send_email_action.go b/internal/service/ses/send_email_action.go new file mode 100644 index 000000000000..c084da17f860 --- /dev/null +++ b/internal/service/ses/send_email_action.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ses + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ses" + awstypes "github.com/aws/aws-sdk-go-v2/service/ses/types" + "github.com/hashicorp/terraform-plugin-framework/action" + "github.com/hashicorp/terraform-plugin-framework/action/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @Action(aws_ses_send_email, name="Send Email") +func newSendEmailAction(_ context.Context) (action.ActionWithConfigure, error) { + return &sendEmailAction{}, nil +} + +var ( + _ action.Action = (*sendEmailAction)(nil) +) + +type sendEmailAction struct { + framework.ActionWithModel[sendEmailActionModel] +} + +type sendEmailActionModel struct { + framework.WithRegionModel + Source types.String `tfsdk:"source"` + ToAddresses fwtypes.ListValueOf[types.String] `tfsdk:"to_addresses"` + CcAddresses fwtypes.ListValueOf[types.String] `tfsdk:"cc_addresses"` + BccAddresses fwtypes.ListValueOf[types.String] `tfsdk:"bcc_addresses"` + Subject types.String `tfsdk:"subject"` + TextBody types.String `tfsdk:"text_body"` + HtmlBody types.String `tfsdk:"html_body"` + ReplyToAddresses fwtypes.ListValueOf[types.String] `tfsdk:"reply_to_addresses"` + ReturnPath types.String `tfsdk:"return_path"` +} + +func (a *sendEmailAction) Schema(ctx context.Context, req action.SchemaRequest, resp *action.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting.", + Attributes: map[string]schema.Attribute{ + names.AttrSource: schema.StringAttribute{ + Description: "The email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.", + Required: true, + }, + "to_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The To: field(s) of the message.", + Optional: true, + }, + "cc_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The CC: field(s) of the message.", + Optional: true, + }, + "bcc_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The BCC: field(s) of the message.", + Optional: true, + }, + "subject": schema.StringAttribute{ + Description: "The subject of the message: A short summary of the content, which will appear in the recipient's inbox.", + Required: true, + }, + "text_body": schema.StringAttribute{ + Description: "The message body in text format. Either text_body or html_body must be specified.", + Optional: true, + }, + "html_body": schema.StringAttribute{ + Description: "The message body in HTML format. Either text_body or html_body must be specified.", + Optional: true, + }, + "reply_to_addresses": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Description: "The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.", + Optional: true, + }, + "return_path": schema.StringAttribute{ + Description: "The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled.", + Optional: true, + }, + }, + } +} + +func (a *sendEmailAction) Invoke(ctx context.Context, req action.InvokeRequest, resp *action.InvokeResponse) { + var config sendEmailActionModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) + if resp.Diagnostics.HasError() { + return + } + + // Validate that at least one body type is provided + if config.TextBody.IsNull() && config.HtmlBody.IsNull() { + resp.Diagnostics.AddError( + "Missing Email Body", + "Either text_body or html_body must be specified", + ) + return + } + + conn := a.Meta().SESClient(ctx) + + source := config.Source.ValueString() + subject := config.Subject.ValueString() + + tflog.Info(ctx, "Starting SES send email action", map[string]any{ + names.AttrSource: source, + "subject": subject, + "has_text_body": !config.TextBody.IsNull(), + "has_html_body": !config.HtmlBody.IsNull(), + }) + + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Sending email from %s...", source), + }) + + // Build destination + destination := &awstypes.Destination{} + if !config.ToAddresses.IsNull() { + destination.ToAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.ToAddresses) + } + if !config.CcAddresses.IsNull() { + destination.CcAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.CcAddresses) + } + if !config.BccAddresses.IsNull() { + destination.BccAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.BccAddresses) + } + + // Build message + message := &awstypes.Message{ + Subject: &awstypes.Content{ + Data: aws.String(subject), + }, + Body: &awstypes.Body{}, + } + + if !config.TextBody.IsNull() { + message.Body.Text = &awstypes.Content{ + Data: config.TextBody.ValueStringPointer(), + } + } + if !config.HtmlBody.IsNull() { + message.Body.Html = &awstypes.Content{ + Data: config.HtmlBody.ValueStringPointer(), + } + } + + // Build input + input := &ses.SendEmailInput{ + Source: aws.String(source), + Destination: destination, + Message: message, + } + + if !config.ReplyToAddresses.IsNull() { + input.ReplyToAddresses = fwflex.ExpandFrameworkStringValueList(ctx, config.ReplyToAddresses) + } + + if !config.ReturnPath.IsNull() { + input.ReturnPath = config.ReturnPath.ValueStringPointer() + } + + // Send email + output, err := conn.SendEmail(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + "Failed to Send Email", + fmt.Sprintf("Could not send email from %s: %s", source, err), + ) + return + } + + messageId := aws.ToString(output.MessageId) + resp.SendProgress(action.InvokeProgressEvent{ + Message: fmt.Sprintf("Email sent successfully (Message ID: %s)", messageId), + }) + + tflog.Info(ctx, "SES send email action completed successfully", map[string]any{ + names.AttrSource: source, + "message_id": messageId, + }) +} diff --git a/internal/service/ses/send_email_action_test.go b/internal/service/ses/send_email_action_test.go new file mode 100644 index 000000000000..5d938d0884c5 --- /dev/null +++ b/internal/service/ses/send_email_action_test.go @@ -0,0 +1,204 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ses_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ses" + awstypes "github.com/aws/aws-sdk-go-v2/service/ses/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSESSendEmailAction_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_basic(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +func TestAccSESSendEmailAction_htmlBody(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_htmlBody(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +func TestAccSESSendEmailAction_multipleRecipients(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + testEmail := acctest.SkipIfEnvVarNotSet(t, "SES_VERIFIED_EMAIL") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SESEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.SESServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_14_0), + }, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccSendEmailActionConfig_multipleRecipients(rName, testEmail), + Check: resource.ComposeTestCheckFunc( + testAccCheckSendEmailAction(ctx, testEmail), + ), + }, + }, + }) +} + +// testAccCheckSendEmailAction verifies the action can send emails +func testAccCheckSendEmailAction(ctx context.Context, sourceEmail string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SESClient(ctx) + + // Verify the source email is verified in SES + input := &ses.GetIdentityVerificationAttributesInput{ + Identities: []string{sourceEmail}, + } + + output, err := conn.GetIdentityVerificationAttributes(ctx, input) + if err != nil { + return fmt.Errorf("Failed to get identity verification attributes: %w", err) + } + + if attrs, ok := output.VerificationAttributes[sourceEmail]; ok { + if attrs.VerificationStatus != awstypes.VerificationStatusSuccess { + return fmt.Errorf("Email %s is not verified in SES (status: %s)", sourceEmail, string(attrs.VerificationStatus)) + } + } else { + return fmt.Errorf("Email %s not found in SES identities", sourceEmail) + } + + return nil + } +} + +// Configuration functions + +func testAccSendEmailActionConfig_basic(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "Test Email from %[1]s" + text_body = "This is a test email sent from Terraform action test." + to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} + +func testAccSendEmailActionConfig_htmlBody(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "HTML Test Email from %[1]s" + html_body = "

Test Email

This is a test email sent from Terraform action test.

" + to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} + +func testAccSendEmailActionConfig_multipleRecipients(rName, testEmail string) string { + return fmt.Sprintf(` +action "aws_ses_send_email" "test" { + config { + source = %[2]q + subject = "Multi-recipient Test Email from %[1]s" + text_body = "This is a test email sent to multiple recipients." + to_addresses = [%[2]q] + cc_addresses = [%[2]q] + reply_to_addresses = [%[2]q] + } +} + +resource "terraform_data" "trigger" { + input = "trigger" + lifecycle { + action_trigger { + events = [before_create] + actions = [action.aws_ses_send_email.test] + } + } +} +`, rName, testEmail) +} diff --git a/internal/service/ses/service_package_gen.go b/internal/service/ses/service_package_gen.go index 95d7fefaf004..92ab96308930 100644 --- a/internal/service/ses/service_package_gen.go +++ b/internal/service/ses/service_package_gen.go @@ -17,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newSendEmailAction, + TypeName: "aws_ses_send_email", + Name: "Send Email", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } diff --git a/internal/service/sfn/generate.go b/internal/service/sfn/generate.go index 5b3d8d3fe089..9bbaf20e5b13 100644 --- a/internal/service/sfn/generate.go +++ b/internal/service/sfn/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/listpages/main.go -ListOps=ListStateMachineVersions //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/identitytests/main.go //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/sfn/service_package_gen.go b/internal/service/sfn/service_package_gen.go index 9442088cf575..d99a1cc1d9cc 100644 --- a/internal/service/sfn/service_package_gen.go +++ b/internal/service/sfn/service_package_gen.go @@ -79,6 +79,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*inttypes.ServicePa IdentifierAttribute: names.AttrID, }), Region: unique.Make(inttypes.ResourceRegionDefault()), + Identity: inttypes.RegionalARNIdentity( + inttypes.WithIdentityDuplicateAttrs(names.AttrID), + ), + Import: inttypes.SDKv2Import{ + WrappedImport: true, + }, }, } } diff --git a/internal/service/sfn/state_machine.go b/internal/service/sfn/state_machine.go index 917c67311a1b..9b9e10739b05 100644 --- a/internal/service/sfn/state_machine.go +++ b/internal/service/sfn/state_machine.go @@ -35,6 +35,9 @@ import ( // @SDKResource("aws_sfn_state_machine", name="State Machine") // @Tags(identifierAttribute="id") +// @ArnIdentity +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/sfn;sfn.DescribeStateMachineOutput") +// @Testing(preIdentityVersion="v6.13.0") func resourceStateMachine() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceStateMachineCreate, @@ -42,10 +45,6 @@ func resourceStateMachine() *schema.Resource { UpdateWithoutTimeout: resourceStateMachineUpdate, DeleteWithoutTimeout: resourceStateMachineDelete, - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(5 * time.Minute), Update: schema.DefaultTimeout(1 * time.Minute), diff --git a/internal/service/sfn/state_machine_identity_gen_test.go b/internal/service/sfn/state_machine_identity_gen_test.go new file mode 100644 index 000000000000..69b285ed95a5 --- /dev/null +++ b/internal/service/sfn/state_machine_identity_gen_test.go @@ -0,0 +1,283 @@ +// Code generated by internal/generate/identitytests/main.go; DO NOT EDIT. + +package sfn_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/sfn" + "github.com/hashicorp/terraform-plugin-testing/compare" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfstatecheck "github.com/hashicorp/terraform-provider-aws/internal/acctest/statecheck" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSFNStateMachine_Identity_Basic(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineOutput + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckStateMachineDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import block with Import ID + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + + // Step 4: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.Region())), + }, + }, + }, + }, + }) +} + +func TestAccSFNStateMachine_Identity_RegionOverride(t *testing.T) { + ctx := acctest.Context(t) + + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: acctest.CheckDestroyNoop, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + // Step 1: Setup + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrID), resourceName, tfjsonpath.New(names.AttrARN), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + + // Step 2: Import command with appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 3: Import command without appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ImportStateKind: resource.ImportCommandWithID, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + + // Step 4: Import block with Import ID and appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportStateIdFunc: acctest.CrossRegionImportStateIdFunc(resourceName), + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 5: Import block with Import ID and no appended "@" + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithID, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + + // Step 6: Import block with Resource Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/region_override/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "region": config.StringVariable(acctest.AlternateRegion()), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateKind: resource.ImportBlockWithResourceIdentity, + ImportPlanChecks: resource.ImportPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrARN), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrID), knownvalue.NotNull()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrRegion), knownvalue.StringExact(acctest.AlternateRegion())), + }, + }, + }, + }, + }) +} + +// Resource Identity was added after v6.13.0 +func TestAccSFNStateMachine_Identity_ExistingResource(t *testing.T) { + ctx := acctest.Context(t) + + var v sfn.DescribeStateMachineOutput + resourceName := "aws_sfn_state_machine.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + acctest.ParallelTest(ctx, t, resource.TestCase{ + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.SkipBelow(tfversion.Version1_12_0), + }, + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), + CheckDestroy: testAccCheckStateMachineDestroy(ctx), + Steps: []resource.TestStep{ + // Step 1: Create pre-Identity + { + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic_v6.13.0/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + tfstatecheck.ExpectNoIdentity(resourceName), + }, + }, + + // Step 2: Current version + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/StateMachine/basic/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectIdentity(resourceName, map[string]knownvalue.Check{ + names.AttrARN: knownvalue.NotNull(), + }), + statecheck.ExpectIdentityValueMatchesState(resourceName, tfjsonpath.New(names.AttrARN)), + }, + }, + }, + }) +} diff --git a/internal/service/sfn/state_machine_test.go b/internal/service/sfn/state_machine_test.go index ab0d5f08ded0..69d3b2b4b075 100644 --- a/internal/service/sfn/state_machine_test.go +++ b/internal/service/sfn/state_machine_test.go @@ -40,7 +40,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { { Config: testAccStateMachineConfig_basic(rName, 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -70,7 +70,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { { Config: testAccStateMachineConfig_basic(rName, 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(ctx, resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), @@ -108,7 +108,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -129,7 +129,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -164,7 +164,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 5), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -186,7 +186,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 10), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -221,8 +221,8 @@ func TestAccSFNStateMachine_nameGenerated(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_nameGenerated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrNameGenerated(resourceName, names.AttrName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, id.UniqueIdPrefix), ), @@ -250,8 +250,8 @@ func TestAccSFNStateMachine_namePrefix(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_namePrefix(rName, "tf-acc-test-prefix-"), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceAttrNameFromPrefix(resourceName, names.AttrName, "tf-acc-test-prefix-"), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, "tf-acc-test-prefix-"), ), @@ -279,8 +279,8 @@ func TestAccSFNStateMachine_publish(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_publish(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "publish", acctest.CtTrue), resource.TestCheckResourceAttrSet(resourceName, "state_machine_version_arn"), ), @@ -309,8 +309,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -322,8 +322,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { }, { Config: testAccStateMachineConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "2"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -331,8 +331,8 @@ func TestAccSFNStateMachine_tags(t *testing.T) { }, { Config: testAccStateMachineConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, "1"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -355,8 +355,8 @@ func TestAccSFNStateMachine_tracing(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_tracingDisable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.0.enabled", acctest.CtFalse), ), @@ -368,8 +368,8 @@ func TestAccSFNStateMachine_tracing(t *testing.T) { }, { Config: testAccStateMachineConfig_tracingEnable(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "tracing_configuration.0.enabled", acctest.CtTrue), ), @@ -392,8 +392,8 @@ func TestAccSFNStateMachine_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_basic(rName, 5), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfsfn.ResourceStateMachine(), resourceName), ), ExpectNonEmptyPlan: true, @@ -416,8 +416,8 @@ func TestAccSFNStateMachine_expressLogging(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelError)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -430,8 +430,8 @@ func TestAccSFNStateMachine_expressLogging(t *testing.T) { }, { Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelAll)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -456,7 +456,7 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test reusePeriodSeconds1 := 900 reusePeriodSeconds2 := 450 - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -464,8 +464,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds1), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -487,8 +487,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update periodReuseSeconds { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -505,8 +505,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update kmsKeyId { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_2(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds2), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -523,8 +523,8 @@ func TestAccSFNStateMachine_encryptionConfigurationCustomerManagedKMSKey(t *test //Update Encryption Key Type { Config: testAccStateMachineConfig_encryptionConfigurationServiceOwnedKey(rName, string(awstypes.EncryptionTypeAwsOwnedKey)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -547,7 +547,7 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) reusePeriodSeconds := 900 - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.SFNServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -555,8 +555,8 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) Steps: []resource.TestStep{ { Config: testAccStateMachineConfig_encryptionConfigurationServiceOwnedKey(rName, string(awstypes.EncryptionTypeAwsOwnedKey)), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -575,8 +575,8 @@ func TestAccSFNStateMachine_encryptionConfigurationServiceOwnedKey(t *testing.T) //Update Encryption Type { Config: testAccStateMachineConfig_encryptionConfigurationCustomerManagedKMSKey_1(rName, string(awstypes.EncryptionTypeCustomerManagedKmsKey), reusePeriodSeconds), - Check: resource.ComposeTestCheckFunc( - testAccCheckExists(ctx, resourceName, &sm), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckStateMachineExists(ctx, resourceName, &sm), resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -612,7 +612,7 @@ func TestAccSFNStateMachine_definitionValidation(t *testing.T) { }) } -func testAccCheckExists(ctx context.Context, n string, v *sfn.DescribeStateMachineOutput) resource.TestCheckFunc { +func testAccCheckStateMachineExists(ctx context.Context, n string, v *sfn.DescribeStateMachineOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/sfn/testdata/StateMachine/basic/main_gen.tf b/internal/service/sfn/testdata/StateMachine/basic/main_gen.tf new file mode 100644 index 000000000000..ca71e97ec2e7 --- /dev/null +++ b/internal/service/sfn/testdata/StateMachine/basic/main_gen.tf @@ -0,0 +1,134 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_sfn_state_machine" "test" { + name = var.rName + role_arn = aws_iam_role.for_sfn.arn + + definition = < 0 { + // Clean up message + _, err := conn.DeleteMessage(ctx, &sqs.DeleteMessageInput{ + QueueUrl: &queueURL, + ReceiptHandle: output.Messages[0].ReceiptHandle, + }) + if err != nil { + return fmt.Errorf("error deleting message from SQS: %w", err) + } + return nil + } + } + } + } +} + +func testAccPublishActionConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q +} + +resource "aws_sqs_queue" "test" { + name = %[1]q +} + +resource "aws_sns_topic_subscription" "test" { + topic_arn = aws_sns_topic.test.arn + protocol = "sqs" + endpoint = aws_sqs_queue.test.arn +} + +resource "aws_sqs_queue_policy" "test" { + queue_url = aws_sqs_queue.test.id + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = "*" + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.test.arn + Condition = { + ArnEquals = { + "aws:SourceArn" = aws_sns_topic.test.arn + } + } + }] + }) +} + +action "aws_sns_publish" "test" { + config { + topic_arn = aws_sns_topic.test.arn + message = "Test message from Terraform" + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_sns_publish.test] + } + } + + depends_on = [ + aws_sns_topic_subscription.test, + aws_sqs_queue_policy.test + ] +} +`, rName) +} + +func testAccPublishActionConfig_withAttributes(rName string) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q +} + +resource "aws_sqs_queue" "test" { + name = %[1]q +} + +resource "aws_sns_topic_subscription" "test" { + topic_arn = aws_sns_topic.test.arn + protocol = "sqs" + endpoint = aws_sqs_queue.test.arn +} + +resource "aws_sqs_queue_policy" "test" { + queue_url = aws_sqs_queue.test.id + policy = jsonencode({ + Version = "2008-10-17" + Statement = [{ + Effect = "Allow" + Principal = "*" + Action = "sqs:SendMessage" + Resource = aws_sqs_queue.test.arn + Condition = { + ArnEquals = { + "aws:SourceArn" = aws_sns_topic.test.arn + } + } + }] + }) +} + +action "aws_sns_publish" "test" { + config { + topic_arn = aws_sns_topic.test.arn + subject = "Test Subject" + message = "Test message with attributes" + + message_attributes { + map_block_key = "priority" + data_type = "String" + string_value = "high" + } + + message_attributes { + map_block_key = "source" + data_type = "String" + string_value = "terraform" + } + } +} + +resource "terraform_data" "trigger" { + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_sns_publish.test] + } + } + + depends_on = [ + aws_sns_topic_subscription.test, + aws_sqs_queue_policy.test + ] +} +`, rName) +} diff --git a/internal/service/sns/service_package_gen.go b/internal/service/sns/service_package_gen.go index 5d0ac06a75a2..c11ebeb455d8 100644 --- a/internal/service/sns/service_package_gen.go +++ b/internal/service/sns/service_package_gen.go @@ -17,6 +17,17 @@ import ( type servicePackage struct{} +func (p *servicePackage) Actions(ctx context.Context) []*inttypes.ServicePackageAction { + return []*inttypes.ServicePackageAction{ + { + Factory: newPublishAction, + TypeName: "aws_sns_publish", + Name: "Publish", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, + } +} + func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.ServicePackageFrameworkDataSource { return []*inttypes.ServicePackageFrameworkDataSource{} } diff --git a/internal/tfresource/not_found_error.go b/internal/tfresource/not_found_error.go index 787b710bb5c2..f423cbb989ce 100644 --- a/internal/tfresource/not_found_error.go +++ b/internal/tfresource/not_found_error.go @@ -6,6 +6,7 @@ package tfresource import ( "errors" "fmt" + "iter" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -130,6 +131,43 @@ func AssertSingleValueResult[T any](a []T, fs ...foundFunc[T]) (*T, error) { } } +// AssertSingleValueResultIterErr returns either a pointer to the single value in the iterator or the error value from the iterator. +// If there are not exactly one value, returns a `NotFound` error. +func AssertSingleValueResultIterErr[T any](i iter.Seq2[T, error]) (*T, error) { + next, stop := iter.Pull2(i) + defer stop() + + v, err, ok := next() + if !ok { + return nil, NewEmptyResultError(nil) + } + + if err != nil { + return nil, err + } + + _, err, ok = next() + if !ok { + return &v, nil + } + + if err != nil { + return nil, err + } + n := 2 + for { + _, err, ok = next() + if !ok { + break + } + if err != nil { + return nil, err + } + n++ + } + return nil, NewTooManyResultsError(n, nil) +} + // AssertFirstValueResult returns a pointer to the first value in the specified slice of values. // Returns a `NotFound` error otherwise. func AssertFirstValueResult[T any](a []T) (*T, error) { diff --git a/internal/tfresource/not_found_error_test.go b/internal/tfresource/not_found_error_test.go index 5c170a998208..a8bd08fd004a 100644 --- a/internal/tfresource/not_found_error_test.go +++ b/internal/tfresource/not_found_error_test.go @@ -6,9 +6,11 @@ package tfresource import ( "errors" "fmt" + "iter" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + tfiter "github.com/hashicorp/terraform-provider-aws/internal/iter" ) func TestEmptyResultErrorAsNotFoundError(t *testing.T) { @@ -169,3 +171,125 @@ func TestTooManyResultsErrorIs(t *testing.T) { }) } } + +func TestAssertSingleValueResult(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + input []int + expectedValue int + expectedError error + }{ + "empty slice": { + input: []int{}, + expectedError: NewEmptyResultError(nil), + }, + "single element": { + input: []int{42}, + expectedValue: 42, + }, + "multiple elements": { + input: []int{42, 43}, + expectedError: NewTooManyResultsError(2, nil), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + result, err := AssertSingleValueResult(testCase.input) + + if testCase.expectedError != nil { + if err == nil { + t.Errorf("expected error: %v, got nil", testCase.expectedError) + } else if err.Error() != testCase.expectedError.Error() { + t.Errorf("expected error: %v, got %v", testCase.expectedError, err) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if result == nil { + if testCase.expectedError == nil { + t.Errorf("expected %d, got nil", testCase.expectedValue) + } + return + } else if *result != testCase.expectedValue { + t.Errorf("expected %d, got %d", testCase.expectedValue, *result) + } + }) + } +} + +func TestAssertSingleValueResultIterErr(t *testing.T) { + t.Parallel() + + testCases := map[string]struct { + input iter.Seq2[int, error] + expectedValue int + expectedError error + }{ + "empty slice": { + input: tfiter.Null2[int, error](), + expectedError: NewEmptyResultError(nil), + }, + "single element": { + input: valuesWithErrors([]int{42}), + expectedValue: 42, + }, + "multiple elements": { + input: valuesWithErrors([]int{42, 43}), + expectedError: NewTooManyResultsError(2, nil), + }, + "with error": { + input: valueError(errors.New("test error")), + expectedError: errors.New("test error"), + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + t.Parallel() + + result, err := AssertSingleValueResultIterErr(testCase.input) + + if testCase.expectedError != nil { + if err == nil { + t.Errorf("expected error: %v, got nil", testCase.expectedError) + } else if err.Error() != testCase.expectedError.Error() { + t.Errorf("expected error: %v, got %v", testCase.expectedError, err) + } + } else if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if result == nil { + if testCase.expectedError == nil { + t.Errorf("expected %d, got nil", testCase.expectedValue) + } + return + } else if *result != testCase.expectedValue { + t.Errorf("expected %d, got %d", testCase.expectedValue, *result) + } + }) + } +} + +func valuesWithErrors(values []int) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + for _, v := range values { + if !yield(v, nil) { + break + } + } + } +} + +func valueError(err error) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + if !yield(0, err) { + return + } + } +} diff --git a/internal/types/service_package.go b/internal/types/service_package.go index d783b86823e7..d966e651e8f3 100644 --- a/internal/types/service_package.go +++ b/internal/types/service_package.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/action" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/ephemeral" + "github.com/hashicorp/terraform-plugin-framework/list" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -83,6 +84,15 @@ type ServicePackageFrameworkResource struct { Import FrameworkImport } +type ServicePackageFrameworkListResource struct { + Factory func() list.ListResourceWithConfigure + TypeName string + Name string + Tags unique.Handle[ServicePackageResourceTags] + Region unique.Handle[ServicePackageResourceRegion] + Identity Identity +} + // ServicePackageSDKDataSource represents a Terraform Plugin SDK data source // implemented by a service package. type ServicePackageSDKDataSource struct { @@ -105,6 +115,20 @@ type ServicePackageSDKResource struct { Import SDKv2Import } +type ListResourceForSDK interface { + list.ListResourceWithRawV5Schemas + list.ListResourceWithConfigure +} + +type ServicePackageSDKListResource struct { + Factory func() ListResourceForSDK + TypeName string + Name string + Tags unique.Handle[ServicePackageResourceTags] + Region unique.Handle[ServicePackageResourceRegion] + Identity Identity +} + type Identity struct { IsGlobalResource bool // All IsSingleton bool // Singleton @@ -413,3 +437,7 @@ type SDKv2Import struct { CustomImport bool ImportID SDKv2ImportID // Multi-Parameter } + +type SDKv2Tagger interface { + SetTagsSpec(tags unique.Handle[ServicePackageResourceTags]) +} diff --git a/names/names.go b/names/names.go index 83115a3d186b..9316b49a8c1c 100644 --- a/names/names.go +++ b/names/names.go @@ -296,5 +296,9 @@ func HumanFriendly(service string) (string, error) { } const ( - TopLevelRegionAttributeDescription = `Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference).` + ResourceTopLevelRegionAttributeDescription = `Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). ` + topLevelRegionDefaultDescription + ListResourceTopLevelRegionAttributeDescription = `Region to [query](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) for resources of this type. ` + topLevelRegionDefaultDescription + ActionTopLevelRegionAttributeDescription = `Region where this action will be [executed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). ` + topLevelRegionDefaultDescription + + topLevelRegionDefaultDescription = `Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference).` ) diff --git a/skaff/go.mod b/skaff/go.mod index bc0db8dd8f26..614915d036dc 100644 --- a/skaff/go.mod +++ b/skaff/go.mod @@ -14,11 +14,11 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.66 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/spf13/pflag v1.0.9 // indirect - github.com/zclconf/go-cty v1.16.4 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect golang.org/x/mod v0.28.0 // indirect golang.org/x/sync v0.17.0 // indirect golang.org/x/text v0.29.0 // indirect diff --git a/skaff/go.sum b/skaff/go.sum index 22aa49c7b46b..597c36d88848 100644 --- a/skaff/go.sum +++ b/skaff/go.sum @@ -15,8 +15,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.66 h1:HA6blfR0h6kGnw4oJ92tZzghubreIkWbQJ4NVNqS688= github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.66/go.mod h1:7kTJVbY5+igob9Q5N6KO81EGEKDNI9FpjujB31uI/n0= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -26,8 +26,8 @@ github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/zclconf/go-cty v1.16.4 h1:QGXaag7/7dCzb+odlGrgr+YmYZFaOCMW6DEpS+UD1eE= -github.com/zclconf/go-cty v1.16.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index 8d0ad571b930..9da7ce5369f9 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw go 1.24.6 require ( - github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0 github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 ) @@ -318,9 +318,9 @@ require ( github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hc-install v0.9.2 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.23.0 // indirect + github.com/hashicorp/terraform-exec v0.24.0 // indirect github.com/hashicorp/terraform-json v0.27.2 // indirect github.com/hashicorp/terraform-plugin-framework v1.16.0 // indirect github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 // indirect @@ -329,7 +329,7 @@ require ( github.com/hashicorp/terraform-plugin-framework-validators v0.18.0 // indirect github.com/hashicorp/terraform-plugin-go v0.29.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-testing v1.13.3 // indirect + github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1 // indirect github.com/hashicorp/terraform-registry-address v0.4.0 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.2 // indirect @@ -356,7 +356,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/zclconf/go-cty v1.16.4 // indirect + github.com/zclconf/go-cty v1.17.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.63.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect diff --git a/tools/tfsdk2fw/go.sum b/tools/tfsdk2fw/go.sum index 05897c5ae1bb..d2d462a11cd7 100644 --- a/tools/tfsdk2fw/go.sum +++ b/tools/tfsdk2fw/go.sum @@ -659,12 +659,12 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hc-install v0.9.2 h1:v80EtNX4fCVHqzL9Lg/2xkp62bbvQMnvPQ0G+OmtO24= github.com/hashicorp/hc-install v0.9.2/go.mod h1:XUqBQNnuT4RsxoxiM9ZaUk0NX8hi2h+Lb6/c0OZnC/I= -github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= -github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= +github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.23.0 h1:MUiBM1s0CNlRFsCLJuM5wXZrzA3MnPYEsiXmzATMW/I= -github.com/hashicorp/terraform-exec v0.23.0/go.mod h1:mA+qnx1R8eePycfwKkCRk3Wy65mwInvlpAeOwmA7vlY= +github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE= +github.com/hashicorp/terraform-exec v0.24.0/go.mod h1:lluc/rDYfAhYdslLJQg3J0oDqo88oGQAdHR+wDqFvo4= github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= github.com/hashicorp/terraform-plugin-framework v1.16.0 h1:tP0f+yJg0Z672e7levixDe5EpWwrTrNryPM9kDMYIpE= @@ -681,10 +681,10 @@ github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+ github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= github.com/hashicorp/terraform-plugin-mux v0.21.0 h1:QsEYnzSD2c3zT8zUrUGqaFGhV/Z8zRUlU7FY3ZPJFfw= github.com/hashicorp/terraform-plugin-mux v0.21.0/go.mod h1:Qpt8+6AD7NmL0DS7ASkN0EXpDQ2J/FnnIgeUr1tzr5A= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0 h1:NFPMacTrY/IdcIcnUB+7hsore1ZaRWU9cnB6jFoBnIM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.37.0/go.mod h1:QYmYnLfsosrxjCnGY1p9c7Zj6n9thnEE+7RObeYs3fA= -github.com/hashicorp/terraform-plugin-testing v1.13.3 h1:QLi/khB8Z0a5L54AfPrHukFpnwsGL8cwwswj4RZduCo= -github.com/hashicorp/terraform-plugin-testing v1.13.3/go.mod h1:WHQ9FDdiLoneey2/QHpGM/6SAYf4A7AZazVg7230pLE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0 h1:PQP7Crrc7t/ozj+P9x0/lsTzGNy3lVppH8zAJylofaE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.0/go.mod h1:GQhpKVvvuwzD79e8/NZ+xzj+ZpWovdPAe8nfV/skwNU= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1 h1:caWmY2Fv/KgDAXU7IVjcBDfIdmr/n6VRYhCLxNmlaXs= +github.com/hashicorp/terraform-plugin-testing v1.14.0-beta.1/go.mod h1:jVm3pD9uQAT0X2RSEdcqjju2bCGv5f73DGZFU4v7EAU= github.com/hashicorp/terraform-registry-address v0.4.0 h1:S1yCGomj30Sao4l5BMPjTGZmCNzuv7/GDTDX99E9gTk= github.com/hashicorp/terraform-registry-address v0.4.0/go.mod h1:LRS1Ay0+mAiRkUyltGT+UHWkIqTFvigGn/LbMshfflE= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -750,8 +750,8 @@ github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXq github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -785,8 +785,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.16.4 h1:QGXaag7/7dCzb+odlGrgr+YmYZFaOCMW6DEpS+UD1eE= -github.com/zclconf/go-cty v1.16.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= +github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= diff --git a/version/VERSION b/version/VERSION index e2983690bef3..eecf8f79bfcc 100644 --- a/version/VERSION +++ b/version/VERSION @@ -1 +1 @@ -6.13.1 \ No newline at end of file +6.14.1 \ No newline at end of file diff --git a/website/docs/actions/cloudfront_create_invalidation.html.markdown b/website/docs/actions/cloudfront_create_invalidation.html.markdown new file mode 100644 index 000000000000..12d86e0a1024 --- /dev/null +++ b/website/docs/actions/cloudfront_create_invalidation.html.markdown @@ -0,0 +1,135 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_create_invalidation" +description: |- + Invalidates CloudFront distribution cache for specified paths. +--- + +# Action: aws_cloudfront_create_invalidation + +~> **Note:** `aws_cloudfront_create_invalidation` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invalidates CloudFront distribution cache for specified paths. This action creates an invalidation request and waits for it to complete. + +For information about CloudFront cache invalidation, see the [Amazon CloudFront Developer Guide](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html). For specific information about creating invalidation requests, see the [CreateInvalidation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CreateInvalidation.html) page in the Amazon CloudFront API Reference. + +~> **Note:** CloudFront invalidation requests can take several minutes to complete. This action will wait for the invalidation to finish before continuing. You can only have a limited number of invalidation requests in progress at any given time. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_cloudfront_distribution" "example" { + # ... distribution configuration +} + +action "aws_cloudfront_create_invalidation" "example" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = ["/*"] + } +} + +resource "terraform_data" "example" { + input = "trigger-invalidation" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_cloudfront_create_invalidation.example] + } + } +} +``` + +### Invalidate Specific Paths + +```terraform +action "aws_cloudfront_create_invalidation" "assets" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = [ + "/images/*", + "/css/*", + "/js/app.js", + "/index.html" + ] + timeout = 1200 # 20 minutes + } +} +``` + +### With Custom Caller Reference + +```terraform +action "aws_cloudfront_create_invalidation" "deployment" { + config { + distribution_id = aws_cloudfront_distribution.example.id + paths = ["/*"] + caller_reference = "deployment-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + timeout = 900 + } +} +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to invalidate cache after updating static assets: + +```terraform +# Trigger invalidation after S3 sync +resource "terraform_data" "deploy_complete" { + input = local.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_cloudfront_create_invalidation.post_deploy] + } + } + + depends_on = [aws_s3_object.assets] +} + +action "aws_cloudfront_create_invalidation" "post_deploy" { + config { + distribution_id = aws_cloudfront_distribution.main.id + paths = [ + "/index.html", + "/manifest.json", + "/static/js/*", + "/static/css/*" + ] + } +} +``` + +### Environment-Specific Invalidation + +```terraform +locals { + cache_paths = var.environment == "production" ? [ + "/api/*", + "/assets/*" + ] : ["/*"] +} + +action "aws_cloudfront_create_invalidation" "env_specific" { + config { + distribution_id = aws_cloudfront_distribution.app.id + paths = local.cache_paths + timeout = var.environment == "production" ? 1800 : 900 + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `distribution_id` - (Required) ID of the CloudFront distribution to invalidate cache for. Must be a valid CloudFront distribution ID (e.g., E1GHKQ2EXAMPLE). +* `paths` - (Required) List of file paths or patterns to invalidate. Use `/*` to invalidate all files. Supports specific files (`/index.html`), directory wildcards (`/images/*`), or all files (`/*`). Maximum of 3000 paths per invalidation request. Note: The first 1,000 invalidation paths per month are free, additional paths are charged per path. +* `caller_reference` - (Optional) Unique identifier for the invalidation request. If not provided, one will be generated automatically. Maximum length of 128 characters. +* `timeout` - (Optional) Timeout in seconds to wait for the invalidation to complete. Defaults to 900 seconds (15 minutes). Must be between 60 and 3600 seconds. Invalidation requests typically take 5-15 minutes to process. diff --git a/website/docs/actions/ec2_stop_instance.html.markdown b/website/docs/actions/ec2_stop_instance.html.markdown new file mode 100644 index 000000000000..7d627c98ea3c --- /dev/null +++ b/website/docs/actions/ec2_stop_instance.html.markdown @@ -0,0 +1,91 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_stop_instance" +description: |- + Stops an EC2 instance. +--- + +# Action: aws_ec2_stop_instance + +~> **Note:** `aws_ec2_stop_instance` is in alpha. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Stops an EC2 instance. This action will gracefully stop the instance and wait for it to reach the stopped state. + +For information about Amazon EC2, see the [Amazon EC2 User Guide](https://docs.aws.amazon.com/ec2/latest/userguide/). For specific information about stopping instances, see the [StopInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StopInstances.html) page in the Amazon EC2 API Reference. + +~> **Note:** This action directly stops EC2 instances which will interrupt running workloads. Ensure proper coordination with your applications before using this action. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_instance" "example" { + ami = data.aws_ami.amazon_linux.id + instance_type = "t3.micro" + + tags = { + Name = "example-instance" + } +} + +action "aws_ec2_stop_instance" "example" { + config { + instance_id = aws_instance.example.id + } +} +``` + +### Force Stop + +```terraform +action "aws_ec2_stop_instance" "force_stop" { + config { + instance_id = aws_instance.example.id + force = true + timeout = 300 + } +} +``` + +### Maintenance Window + +```terraform +resource "aws_instance" "web_server" { + ami = data.aws_ami.amazon_linux.id + instance_type = "t3.micro" + + tags = { + Name = "web-server" + } +} + +action "aws_ec2_stop_instance" "maintenance" { + config { + instance_id = aws_instance.web_server.id + timeout = 900 + } +} + +resource "terraform_data" "maintenance_trigger" { + input = var.maintenance_window + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ec2_stop_instance.maintenance] + } + } + + depends_on = [aws_instance.web_server] +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `instance_id` - (Required) ID of the EC2 instance to stop. Must be a valid EC2 instance ID (e.g., i-1234567890abcdef0). +* `force` - (Optional) Forces the instance to stop. The instance does not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: `false`. +* `timeout` - (Optional) Timeout in seconds to wait for the instance to stop. Must be between 30 and 3600 seconds. Default: `600`. diff --git a/website/docs/actions/lambda_invoke.html.markdown b/website/docs/actions/lambda_invoke.html.markdown new file mode 100644 index 000000000000..4a048c106e8f --- /dev/null +++ b/website/docs/actions/lambda_invoke.html.markdown @@ -0,0 +1,221 @@ +--- +subcategory: "Lambda" +layout: "aws" +page_title: "AWS: aws_lambda_invoke" +description: |- + Invokes an AWS Lambda function with the specified payload. +--- + +# Action: aws_lambda_invoke + +~> **Note:** `aws_lambda_invoke` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Invokes an AWS Lambda function with the specified payload. This action allows for imperative invocation of Lambda functions with full control over invocation parameters. + +For information about AWS Lambda functions, see the [AWS Lambda Developer Guide](https://docs.aws.amazon.com/lambda/latest/dg/). For specific information about invoking Lambda functions, see the [Invoke](https://docs.aws.amazon.com/lambda/latest/api/API_Invoke.html) page in the AWS Lambda API Reference. + +~> **Note:** Synchronous invocations will wait for the function to complete execution, while asynchronous invocations return immediately after the request is _accepted_. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_lambda_function" "example" { + # ... function configuration +} + +action "aws_lambda_invoke" "example" { + config { + function_name = aws_lambda_function.example.function_name + payload = jsonencode({ + key1 = "value1" + key2 = "value2" + }) + } +} + +resource "terraform_data" "example" { + input = "trigger-lambda" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.example] + } + } +} +``` + +### Invoke with Function Version + +```terraform +action "aws_lambda_invoke" "versioned" { + config { + function_name = aws_lambda_function.example.function_name + qualifier = aws_lambda_function.example.version + payload = jsonencode({ + operation = "process" + data = var.processing_data + }) + } +} +``` + +### Asynchronous Invocation + +```terraform +action "aws_lambda_invoke" "async" { + config { + function_name = aws_lambda_function.worker.function_name + invocation_type = "Event" + payload = jsonencode({ + task_id = "background-job-${random_uuid.job_id.result}" + data = local.background_task_data + }) + } +} +``` + +### Dry Run Validation + +```terraform +action "aws_lambda_invoke" "validate" { + config { + function_name = aws_lambda_function.validator.function_name + invocation_type = "DryRun" + payload = jsonencode({ + config = var.validation_config + }) + } +} +``` + +### With Log Capture + +```terraform +action "aws_lambda_invoke" "debug" { + config { + function_name = aws_lambda_function.debug.function_name + log_type = "Tail" + payload = jsonencode({ + debug_level = "verbose" + component = "api-gateway" + }) + } +} +``` + +### Mobile Application Context + +```terraform +action "aws_lambda_invoke" "mobile" { + config { + function_name = aws_lambda_function.mobile_backend.function_name + client_context = base64encode(jsonencode({ + client = { + client_id = "mobile-app" + app_version = "1.2.3" + } + env = { + locale = "en_US" + } + })) + payload = jsonencode({ + user_id = var.user_id + action = "sync_data" + }) + } +} +``` + +### CI/CD Pipeline Integration + +Use this action in your deployment pipeline to trigger post-deployment functions: + +```terraform +# Trigger warmup after deployment +resource "terraform_data" "deploy_complete" { + input = local.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_lambda_invoke.warmup] + } + } + + depends_on = [aws_lambda_function.api] +} + +action "aws_lambda_invoke" "warmup" { + config { + function_name = aws_lambda_function.api.function_name + payload = jsonencode({ + action = "warmup" + source = "terraform-deployment" + }) + } +} +``` + +### Environment-Specific Processing + +```terraform +locals { + processing_config = var.environment == "production" ? { + batch_size = 100 + timeout = 900 + } : { + batch_size = 10 + timeout = 60 + } +} + +action "aws_lambda_invoke" "process_data" { + config { + function_name = aws_lambda_function.processor.function_name + payload = jsonencode(merge(local.processing_config, { + data_source = var.data_source + environment = var.environment + })) + } +} +``` + +### Complex Payload with Dynamic Content + +```terraform +action "aws_lambda_invoke" "complex" { + config { + function_name = aws_lambda_function.orchestrator.function_name + payload = jsonencode({ + workflow = { + id = "workflow-${timestamp()}" + steps = var.workflow_steps + } + resources = { + s3_bucket = aws_s3_bucket.data.bucket + dynamodb = aws_dynamodb_table.state.name + sns_topic = aws_sns_topic.notifications.arn + } + metadata = { + created_by = "terraform" + environment = var.environment + version = var.app_version + } + }) + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `client_context` - (Optional) Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. This is only used for mobile applications and should contain information about the client application and device. +* `function_name` - (Required) Name, ARN, or partial ARN of the Lambda function to invoke. You can specify a function name (e.g., `my-function`), a qualified function name (e.g., `my-function:PROD`), or a partial ARN (e.g., `123456789012:function:my-function`). +* `invocation_type` - (Optional) Invocation type. Valid values are `RequestResponse` (default) for synchronous invocation that waits for the function to complete and returns the response, `Event` for asynchronous invocation that returns immediately after the request is accepted, and `DryRun` to validate parameters and verify permissions without actually executing the function. +* `log_type` - (Optional) Set to `Tail` to include the execution log in the response. Only applies to synchronous invocations (`RequestResponse` invocation type). Defaults to `None`. When set to `Tail`, the last 4 KB of the execution log is included in the response. +* `payload` - (Required) JSON payload to send to the Lambda function. This should be a valid JSON string that represents the event data for your function. The payload size limit is 6 MB for synchronous invocations and 256 KB for asynchronous invocations. +* `qualifier` - (Optional) Version or alias of the Lambda function to invoke. If not specified, the `$LATEST` version will be invoked. Can be a version number (e.g., `1`) or an alias (e.g., `PROD`). diff --git a/website/docs/actions/ses_send_email.html.markdown b/website/docs/actions/ses_send_email.html.markdown new file mode 100644 index 000000000000..4f1bd36adae0 --- /dev/null +++ b/website/docs/actions/ses_send_email.html.markdown @@ -0,0 +1,176 @@ +--- +subcategory: "SES (Simple Email)" +layout: "aws" +page_title: "AWS: aws_ses_send_email" +description: |- + Sends an email using Amazon SES. +--- + +# Action: aws_ses_send_email + +~> **Note:** `aws_ses_send_email` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Sends an email using Amazon SES. This action allows for imperative email sending with full control over recipients, content, and formatting. + +For information about Amazon SES, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/). For specific information about sending emails, see the [SendEmail](https://docs.aws.amazon.com/ses/latest/APIReference/API_SendEmail.html) page in the Amazon SES API Reference. + +~> **Note:** All email addresses used must be verified in Amazon SES or belong to a verified domain. Due to the difficulty in testing, your help is important in discovering and reporting issues. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_ses_email_identity" "example" { + email = "sender@example.com" +} + +action "aws_ses_send_email" "example" { + config { + source = aws_ses_email_identity.example.email + subject = "Test Email" + text_body = "This is a test email sent from Terraform." + to_addresses = ["recipient@example.com"] + } +} + +resource "terraform_data" "example" { + input = "send-notification" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_ses_send_email.example] + } + } +} +``` + +### HTML Email with Multiple Recipients + +```terraform +action "aws_ses_send_email" "newsletter" { + config { + source = aws_ses_email_identity.marketing.email + subject = "Monthly Newsletter - ${formatdate("MMMM YYYY", timestamp())}" + html_body = "

Welcome!

This is our monthly newsletter.

" + to_addresses = var.subscriber_emails + cc_addresses = ["manager@example.com"] + reply_to_addresses = ["support@example.com"] + return_path = "bounces@example.com" + } +} +``` + +### Deployment Notification + +```terraform +action "aws_ses_send_email" "deploy_notification" { + config { + source = "deployments@example.com" + subject = "Deployment Complete: ${var.environment}" + text_body = "Application ${var.app_name} has been successfully deployed to ${var.environment}." + to_addresses = var.team_emails + } +} + +resource "terraform_data" "deployment" { + input = var.deployment_id + + lifecycle { + action_trigger { + events = [after_create] + actions = [action.aws_ses_send_email.deploy_notification] + } + } + + depends_on = [aws_instance.app] +} +``` + +### Alert Email with Dynamic Content + +```terraform +locals { + alert_body = templatefile("${path.module}/templates/alert.txt", { + service = var.service_name + environment = var.environment + timestamp = timestamp() + details = var.alert_details + }) +} + +action "aws_ses_send_email" "alert" { + config { + source = "alerts@example.com" + subject = "ALERT: ${var.service_name} Issue Detected" + text_body = local.alert_body + to_addresses = var.oncall_emails + cc_addresses = var.manager_emails + } +} +``` + +### Multi-format Email + +```terraform +action "aws_ses_send_email" "welcome" { + config { + source = aws_ses_email_identity.noreply.email + subject = "Welcome to ${var.company_name}!" + text_body = "Welcome! Thank you for joining us. Visit our website for more information." + html_body = templatefile("${path.module}/templates/welcome.html", { + user_name = var.user_name + company_name = var.company_name + website_url = var.website_url + }) + to_addresses = [var.user_email] + } +} +``` + +### Conditional Email Sending + +```terraform +action "aws_ses_send_email" "conditional" { + config { + source = "notifications@example.com" + subject = var.environment == "production" ? "Production Alert" : "Test Alert" + text_body = "This is a ${var.environment} environment notification." + to_addresses = var.environment == "production" ? var.prod_emails : var.dev_emails + } +} +``` + +### Batch Processing Notification + +```terraform +action "aws_ses_send_email" "batch_complete" { + config { + source = "batch-jobs@example.com" + subject = "Batch Processing Complete - ${var.job_name}" + html_body = <<-HTML +

Batch Job Results

+

Job: ${var.job_name}

+

Records Processed: ${var.records_processed}

+

Duration: ${var.processing_duration}

+

Status: ${var.job_status}

+ HTML + to_addresses = var.admin_emails + } +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `bcc_addresses` - (Optional) List of email addresses for the BCC: field of the message. Recipients in this list will receive the email but their addresses will not be visible to other recipients. +* `cc_addresses` - (Optional) List of email addresses for the CC: field of the message. Recipients in this list will receive the email and their addresses will be visible to all recipients. +* `html_body` - (Optional) Message body in HTML format. Either `text_body` or `html_body` (or both) must be specified. HTML content allows for rich formatting including links, images, and styling. +* `reply_to_addresses` - (Optional) List of reply-to email addresses for the message. If the recipient replies to the message, each reply-to address will receive the reply. If not specified, replies will go to the source address. +* `return_path` - (Optional) Email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. This is useful for handling delivery failures and spam complaints. +* `source` - (Required) Email address that is sending the email. This address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. +* `subject` - (Required) Subject of the message: A short summary of the content, which will appear in the recipient's inbox. +* `text_body` - (Optional) Message body in text format. Either `text_body` or `html_body` (or both) must be specified. Text format ensures compatibility with all email clients. +* `to_addresses` - (Optional) List of email addresses for the To: field of the message. These are the primary recipients of the email. diff --git a/website/docs/actions/sns_publish.html.markdown b/website/docs/actions/sns_publish.html.markdown new file mode 100644 index 000000000000..dfc2206e68b2 --- /dev/null +++ b/website/docs/actions/sns_publish.html.markdown @@ -0,0 +1,150 @@ +--- +subcategory: "SNS (Simple Notification)" +layout: "aws" +page_title: "AWS: aws_sns_publish" +description: |- + Publishes a message to an Amazon SNS topic. +--- + +# Action: aws_sns_publish + +~> **Note:** `aws_sns_publish` is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Publishes a message to an Amazon SNS topic. This action allows for imperative message publishing with full control over message attributes and structure. + +For information about Amazon SNS, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/). For specific information about publishing messages, see the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) page in the Amazon SNS API Reference. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_sns_topic" "example" { + name = "example-topic" +} + +action "aws_sns_publish" "example" { + config { + topic_arn = aws_sns_topic.example.arn + message = "Hello from Terraform!" + } +} + +resource "terraform_data" "example" { + input = "trigger-message" + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sns_publish.example] + } + } +} +``` + +### Message with Subject + +```terraform +action "aws_sns_publish" "notification" { + config { + topic_arn = aws_sns_topic.alerts.arn + subject = "System Alert" + message = "Critical system event detected at ${timestamp()}" + } +} +``` + +### JSON Message Structure + +```terraform +action "aws_sns_publish" "structured" { + config { + topic_arn = aws_sns_topic.mobile.arn + message_structure = "json" + message = jsonencode({ + default = "Default message" + email = "Email version of the message" + sms = "SMS version" + GCM = jsonencode({ + data = { + message = "Push notification message" + } + }) + }) + } +} +``` + +### Message with Attributes + +```terraform +action "aws_sns_publish" "with_attributes" { + config { + topic_arn = aws_sns_topic.processing.arn + message = "Process this data" + + message_attributes { + map_block_key = "priority" + data_type = "String" + string_value = "high" + } + + message_attributes { + map_block_key = "source" + data_type = "String" + string_value = "terraform" + } + } +} +``` + +### Deployment Notification + +```terraform +action "aws_sns_publish" "deploy_complete" { + config { + topic_arn = aws_sns_topic.deployments.arn + subject = "Deployment Complete" + message = jsonencode({ + environment = var.environment + version = var.app_version + timestamp = timestamp() + resources = { + instances = length(aws_instance.app) + databases = length(aws_db_instance.main) + } + }) + } +} + +resource "terraform_data" "deploy_trigger" { + input = var.deployment_id + + lifecycle { + action_trigger { + events = [before_create, before_update] + actions = [action.aws_sns_publish.deploy_complete] + } + } + + depends_on = [aws_instance.app, aws_db_instance.main] +} +``` + +## Argument Reference + +This action supports the following arguments: + +* `message` - (Required) Message to publish. For JSON message structure, this should be a JSON object with protocol-specific messages. Maximum size is 256 KB. +* `message_attributes` - (Optional) Message attributes to include with the message. Each attribute consists of a name, data type, and value. Up to 10 attributes are allowed. [See below.](#message-attributes) +* `message_structure` - (Optional) Set to `json` if you want to send different messages for each protocol. If not specified, the message will be sent as-is to all protocols. +* `subject` - (Optional) Optional subject for the message. Only used for email and email-json protocols. Maximum length is 100 characters. +* `topic_arn` - (Required) ARN of the SNS topic to publish the message to. + +### Message Attributes + +The `message_attributes` block supports: + +* `data_type` - (Required) Data type of the message attribute. Valid values are `String`, `Number`, and `Binary`. +* `map_block_key` - (Required) Name of the message attribute (used as map key). Must be unique within the message. +* `string_value` - (Required) Value of the message attribute. diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 0e7b0b08b58b..f00bc0569d0c 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -36,9 +36,10 @@ This data source supports the following arguments: * `instance_id` - (Optional) Specify the exact Instance ID with which to populate the data source. * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on the desired Instance. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `get_user_data` - (Optional) Retrieve Base64 encoded User Data contents into the `user_data_base64` attribute. A SHA-1 hash of the User Data contents will always be present in the `user_data` attribute. Defaults to `false`. @@ -48,6 +49,14 @@ several valid keys, for a full reference, check out Terraform will fail. Ensure that your search is specific enough to return a single Instance ID only. +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + ## Attribute Reference `id` is set to the ID of the found Instance. In addition, the following attributes diff --git a/website/docs/d/instances.html.markdown b/website/docs/d/instances.html.markdown index 276a36802bd6..ebfa78f0bf7b 100644 --- a/website/docs/d/instances.html.markdown +++ b/website/docs/d/instances.html.markdown @@ -51,9 +51,18 @@ This data source supports the following arguments: * `instance_tags` - (Optional) Map of tags, each pair of which must exactly match a pair on desired instances. * `instance_state_names` - (Optional) List of instance states that should be applicable to the desired instances. The permitted values are: `pending, running, shutting-down, stopped, stopping, terminated`. The default value is `running`. -* `filter` - (Optional) One or more name/value pairs to use as filters. There are -several valid keys, for a full reference, check out -[describe-instances in the AWS CLI reference][1]. +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. ## Attribute Reference diff --git a/website/docs/d/odb_cloud_vm_cluster.html.markdown b/website/docs/d/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..53d1f8f0be16 --- /dev/null +++ b/website/docs/d/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform data source for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + +# Data Source: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +data "aws_odb_db_servers_list" "example" { + cloud_exadata_infrastructure_id = "example-id" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) The unique identifier of the Exadata infrastructure. + +The following arguments are optional: + +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `cloud_exadata_infrastructure_id` - The ID of the Cloud Exadata Infrastructure. +* `cluster_name` - The name of the Grid Infrastructure (GI) cluster. +* `cpu_core_count` - The number of CPU cores enabled on the VM cluster. +* `data_storage_size_in_tbs` - The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster. +* `db_node_storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster. +* `db_servers` - The list of database servers for the VM cluster. +* `disk_redundancy` - The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy. +* `display_name` - The display name of the VM cluster. +* `domain` - The domain name of the VM cluster. +* `gi_version` - The software version of the Oracle Grid Infrastructure (GI) for the VM cluster. +* `hostname_prefix_computed` - The computed hostname prefix for the VM cluster. +* `is_local_backup_enabled` - Indicates whether database backups to local Exadata storage is enabled for the VM cluster. +* `is_sparse_disk_group_enabled` - Indicates whether the VM cluster is configured with a sparse disk group. +* `last_update_history_entry_id` - The Oracle Cloud ID (OCID) of the last maintenance update history entry. +* `license_model` - The Oracle license model applied to the VM cluster. +* `listener_port` - The port number configured for the listener on the VM cluster. +* `memory_size_in_gbs` - The amount of memory, in gigabytes (GB), that's allocated for the VM cluster. +* `node_count` - The number of nodes in the VM cluster. +* `ocid` - The OCID of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI Resource Anchor. +* `oci_url` - The HTTPS link to the VM cluster in OCI. +* `odb_network_id` - The ID of the ODB network. +* `percent_progress` - The amount of progress made on the current operation on the VM cluster, expressed as a percentage. +* `scan_dns_name` - The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster. +* `scan_ip_ids` - The OCID of the SCAN IP addresses that are associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure that's running the VM cluster. +* `ssh_public_keys` - The public key portion of one or more key pairs used for SSH access to the VM cluster. +* `status` - The status of the VM cluster. +* `status_reason` - Additional information about the status of the VM cluster. +* `storage_size_in_gbs` - The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster. +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `timezone` - The time zone of the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster. +* `created_at` - The time when the VM cluster was created. +* `compute_model` - The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled. +* `data_collection_options` - The set of diagnostic collection options enabled for the VM cluster. +* `iorm_config_cache` - The ExadataIormConfig cache details for the VM cluster. diff --git a/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown b/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown index 64a13f45f055..f7c321734c32 100644 --- a/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown +++ b/website/docs/ephemeral-resources/cognito_identity_openid_token_for_developer_identity.markdown @@ -6,7 +6,6 @@ description: |- Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. --- - # Ephemeral: aws_cognito_identity_openid_token_for_developer_identity Terraform ephemeral resource for managing an AWS Cognito Identity Open ID Token for Developer Identity. diff --git a/website/docs/list-resources/batch_job_queue.html.markdown b/website/docs/list-resources/batch_job_queue.html.markdown new file mode 100644 index 000000000000..30c4f93573b6 --- /dev/null +++ b/website/docs/list-resources/batch_job_queue.html.markdown @@ -0,0 +1,28 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_queue" +description: |- + Lists Batch Job Queue resources. +--- + +# List Resource: aws_batch_job_queue + +~> **Note:** The `aws_batch_job_queue` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists Batch Job Queue resources. + +## Example Usage + +```terraform +list "aws_batch_job_queue" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). diff --git a/website/docs/list-resources/cloudwatch_log_group.html.markdown b/website/docs/list-resources/cloudwatch_log_group.html.markdown new file mode 100644 index 000000000000..a51300c5edd9 --- /dev/null +++ b/website/docs/list-resources/cloudwatch_log_group.html.markdown @@ -0,0 +1,28 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_group" +description: |- + Lists CloudWatch Logs Log Group resources. +--- + +# List Resource: aws_cloudwatch_log_group + +~> **Note:** The `aws_cloudwatch_log_group` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists CloudWatch Logs Log Group resources. + +## Example Usage + +```terraform +list "aws_cloudwatch_log_group" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). diff --git a/website/docs/list-resources/iam_role.html.markdown b/website/docs/list-resources/iam_role.html.markdown new file mode 100644 index 000000000000..c1c6205556ad --- /dev/null +++ b/website/docs/list-resources/iam_role.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "IAM (Identity & Access Management)" +layout: "aws" +page_title: "AWS: aws_iam_role" +description: |- + Lists IAM Role resources. +--- + +# List Resource: aws_iam_role + +~> **Note:** The `aws_iam_role` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists IAM Role resources. + +Excludes Service-Linked Roles (see "AWS service-linked role" in [IAM Roles Terms and Concepts documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts)). + +## Example Usage + +```terraform +list "aws_iam_role" "example" { + provider = aws +} +``` + +## Argument Reference + +This list resource does not support any arguments. diff --git a/website/docs/list-resources/instance.html.markdown b/website/docs/list-resources/instance.html.markdown new file mode 100644 index 000000000000..689e97c2c91f --- /dev/null +++ b/website/docs/list-resources/instance.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_instance" +description: |- + Lists EC2 Instance resources. +--- + +# List Resource: aws_instance + +~> **Note:** The `aws_instance` List Resource is in beta. Its interface and behavior may change as the feature evolves, and breaking changes are possible. It is offered as a technical preview without compatibility guarantees until Terraform 1.14 is generally available. + +Lists EC2 Instance resources. + +By default, EC2 Instances managed by an Auto Scaling Group and EC2 Instances in either the `terminated` or `shutting-down` state are excluded. + +## Example Usage + +### Basic Usage + +```terraform +list "aws_instance" "example" { + provider = aws +} +``` + +### Filter Usage + +This example will return instances in the `stopped` state. + +```terraform +list "aws_instance" "example" { + provider = aws + + config { + filter { + name = "instance-state-name" + values = ["stopped"] + } + } +} +``` + +## Argument Reference + +This list resource supports the following arguments: + +* `filter` - (Optional) One or more filters to apply to the search. + If multiple `filter` blocks are provided, they all must be true. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. + See [`filter` Block](#filter-block) below. +* `include_auto_scaled` - (Optional) Whether to include EC2 instances that are managed by an Auto Scaling Group. + Default value is `false`. +* `region` - (Optional) [Region](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints) to query. + Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `filter` Block + +The `filter` block supports the following arguments: + +* `name` - (Required) Name of the filter. + For a full reference of filter names, see [describe-instances in the AWS CLI reference][1]. +* `values` - (Required) One or more values to match. + +[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index 9120dc9f7f0c..8cdaf7d9bdad 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -241,6 +241,7 @@ The `lifecycle_hook` configuration block supports the following: * `hook_target_arn` - (Required) ARN of the Lambda function to invoke for the lifecycle hook. * `role_arn` - (Required) ARN of the IAM role that grants the service permission to invoke the Lambda function. * `lifecycle_stages` - (Required) Stages during the deployment when the hook should be invoked. Valid values: `RECONCILE_SERVICE`, `PRE_SCALE_UP`, `POST_SCALE_UP`, `TEST_TRAFFIC_SHIFT`, `POST_TEST_TRAFFIC_SHIFT`, `PRODUCTION_TRAFFIC_SHIFT`, `POST_PRODUCTION_TRAFFIC_SHIFT`. +* `hook_details` - (Optional) Custom parameters that Amazon ECS will pass to the hook target invocations (such as a Lambda function). ### deployment_circuit_breaker diff --git a/website/docs/r/odb_cloud_vm_cluster.html.markdown b/website/docs/r/odb_cloud_vm_cluster.html.markdown new file mode 100644 index 000000000000..bf2b9dde0441 --- /dev/null +++ b/website/docs/r/odb_cloud_vm_cluster.html.markdown @@ -0,0 +1,152 @@ +--- +subcategory: "Oracle Database@AWS" +layout: "AWS: aws_odb_cloud_vm_cluster" +page_title: "AWS: aws_odb_cloud_vm_cluster" +description: |- + Terraform resource for managing cloud vm cluster resource in AWS for Oracle Database@AWS. +--- + +# Resource: aws_odb_cloud_vm_cluster + +Terraform data source for Exadata Infrastructure resource in AWS for Oracle Database@AWS. + +You can find out more about Oracle Database@AWS from [User Guide](https://docs.aws.amazon.com/odb/latest/UserGuide/what-is-odb.html). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_odb_cloud_vm_cluster" "with_minimum_parameter" { + display_name = "my-exa-infra" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["public-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["db-server-1", "db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + data_collection_options { + is_diagnostics_events_enabled = false + is_health_monitoring_enabled = false + is_incident_logs_enabled = false + } +} + + +resource "aws_odb_cloud_vm_cluster" "with_all_parameters" { + display_name = "my-vmc" + cloud_exadata_infrastructure_id = "exa_gjrmtxl4qk" + cpu_core_count = 6 + gi_version = "23.0.0.0" + hostname_prefix = "apollo12" + ssh_public_keys = ["my-ssh-key"] + odb_network_id = "odbnet_3l9st3litg" + is_local_backup_enabled = true + is_sparse_diskgroup_enabled = true + license_model = "LICENSE_INCLUDED" + data_storage_size_in_tbs = 20.0 + db_servers = ["my-dbserver-1", "my-db-server-2"] + db_node_storage_size_in_gbs = 120.0 + memory_size_in_gbs = 60 + cluster_name = "julia-13" + timezone = "UTC" + scan_listener_port_tcp = 1521 + tags = { + "env" = "dev" + } + data_collection_options { + is_diagnostics_events_enabled = true + is_health_monitoring_enabled = true + is_incident_logs_enabled = true + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `cloud_exadata_infrastructure_id` - (Required) The unique identifier of the Exadata infrastructure for this VM cluster. Changing this will create a new resource. +* `cpu_core_count` - (Required) The number of CPU cores to enable on the VM cluster. Changing this will create a new resource. +* `db_servers` - (Required) The list of database servers for the VM cluster. Changing this will create a new resource. +* `display_name` - (Required) A user-friendly name for the VM cluster. Changing this will create a new resource. +* `gi_version` - (Required) A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure. Example: 19.0.0.0 Changing this will create a new resource. +* `hostname_prefix` - (Required) The host name prefix for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. Changing this will create a new resource. +* `odb_network_id` - (Required) The unique identifier of the ODB network for the VM cluster. Changing this will create a new resource. +* `ssh_public_keys` - (Required) The public key portion of one or more key pairs used for SSH access to the VM cluster. Changing this will create a new resource. +* `data_collection_options` - (Required) The set of preferences for the various diagnostic collection options for the VM cluster. + +The following arguments are optional: + +* `cluster_name` - (Optional) The name of the Grid Infrastructure (GI) cluster. Changing this will create a new resource. +* `data_storage_size_in_tbs` - (Optional) The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster. Changing this will create a new resource. +* `db_node_storage_size_in_gbs` - (Optional) The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `is_local_backup_enabled` - (Optional) Specifies whether to enable database backups to local Exadata storage for the VM cluster. Changing this will create a new resource. +* `is_sparse_diskgroup_enabled` - (Optional) Specifies whether to create a sparse disk group for the VM cluster. Changing this will create a new resource. +* `license_model` - (Optional) The Oracle license model to apply to the VM cluster. Default: LICENSE_INCLUDED. Changing this will create a new resource. +* `memory_size_in_gbs` - (Optional) The amount of memory, in gigabytes (GBs), to allocate for the VM cluster. Changing this will create a new resource. +* `scan_listener_port_tcp` - (Optional) The port number for TCP connections to the single client access name (SCAN) listener. Valid values: 1024–8999, except 2484, 6100, 6200, 7060, 7070, 7085, and 7879. Default: 1521. Changing this will create a new resource. +* `timezone` - (Optional) The configured time zone of the VM cluster. Changing this will create a new resource. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). +* `tags` - (Optional) A map of tags to assign to the exadata infrastructure. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The Amazon Resource Name (ARN) for the cloud vm cluster. +* `disk_redundancy` - The type of redundancy for the VM cluster: NORMAL (2-way) or HIGH (3-way). +* `AttrDomain` - The domain name associated with the VM cluster. +* `hostname_prefix_computed` - The host name for the VM cluster. Constraints: - Can't be "localhost" or "hostname". - Can't contain "-version". - The maximum length of the combined hostname and domain is 63 characters. - The hostname must be unique within the subnet. This member is required. Changing this will create a new resource. +* `iorm_config_cache` - The Exadata IORM (I/O Resource Manager) configuration cache details for the VM cluster. +* `last_update_history_entry_id` - The OCID of the most recent maintenance update history entry. +* `listener_port` - The listener port number configured on the VM cluster. +* `node_count` - The total number of nodes in the VM cluster. +* `ocid` - The OCID (Oracle Cloud Identifier) of the VM cluster. +* `oci_resource_anchor_name` - The name of the OCI resource anchor associated with the VM cluster. +* `oci_url` - The HTTPS link to the VM cluster resource in OCI. +* `percent_progress` - The percentage of progress made on the current operation for the VM cluster. +* `scan_dns_name` - The fully qualified domain name (FQDN) for the SCAN IP addresses associated with the VM cluster. +* `scan_dns_record_id` - The OCID of the DNS record for the SCAN IPs linked to the VM cluster. +* `scan_ip_ids` - The list of OCIDs for SCAN IP addresses associated with the VM cluster. +* `shape` - The hardware model name of the Exadata infrastructure running the VM cluster. +* `status` - The current lifecycle status of the VM cluster. +* `status_reason` - Additional information regarding the current status of the VM cluster. +* `storage_size_in_gbs` - The local node storage allocated to the VM cluster, in gigabytes (GB). +* `system_version` - The operating system version of the image chosen for the VM cluster. +* `vip_ids` - The virtual IP (VIP) addresses assigned to the VM cluster. CRS assigns one VIP per node for failover support. +* `created_at` - The timestamp when the VM cluster was created. +* `compute_model` - The compute model used when the instance is created or cloned — either ECPU or OCPU. ECPU is a virtualized compute unit; OCPU is a physical processor core with hyper-threading. +* `tags_all` - The combined set of user-defined and provider-defined tags. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `24h`) +* `update` - (Default `24h`) +* `delete` - (Default `24h`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import OpenSearch Ingestion Pipeline using the `id`. For example: + +```terraform +import { + to = aws_odb_cloud_vm_cluster.example + id = "example" +} +``` + +Using `terraform import`, import cloud vm cluster using the `id`. For example: + +```console +% terraform import aws_odb_cloud_vm_cluster.example example +``` diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 8662110dc4e5..0e9e3fd0543b 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -209,6 +209,27 @@ This resource exports the following attributes in addition to the arguments abov ## Import +In Terraform v1.12.0 and later, the [`import` block](https://developer.hashicorp.com/terraform/language/import) can be used with the `identity` attribute. For example: + +```terraform +import { + to = aws_sfn_state_machine.example + identity = { + "arn" = "arn:aws:states:eu-west-1:123456789098:stateMachine:bar" + } +} + +resource "aws_sfn_state_machine" "example" { + ### Configuration omitted for brevity ### +} +``` + +### Identity Schema + +#### Required + +- `arn` (String) ARN of the state machine. + In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import State Machines using the `arn`. For example: ```terraform