From f74e6d6f2864b737ae12e1e62e14086841a1d198 Mon Sep 17 00:00:00 2001 From: Modular Magician Date: Tue, 25 Jun 2024 21:48:07 +0000 Subject: [PATCH] Merge pull request #11023 from c2thorn/sync-main-FEATURE-BRANCH-6.0.0 Sync main feature branch 6.0.0 [upstream:22e1ca94afba382c6bfd496d8d1bbfb76782182b] Signed-off-by: Modular Magician --- .changelog/11023.txt | 3 + go.mod | 2 +- go.sum | 4 +- google/functions/location_from_id_test.go | 2 - google/functions/name_from_id_test.go | 2 - google/functions/project_from_id_test.go | 2 - google/functions/region_from_id_test.go | 2 - google/functions/region_from_zone.go | 18 +- google/functions/region_from_zone_test.go | 3 +- google/functions/zone_from_id_test.go | 2 - google/provider/provider_mmv1_resources.go | 5 +- ...cess_context_manager_service_perimeters.go | 18 +- ...ontext_manager_services_perimeters_test.go | 21 +- .../bigtable/resource_bigtable_gc_policy.go | 17 +- .../resource_bigtable_gc_policy_test.go | 80 +++ .../resource_cloudfunctions2_function.go | 139 ++++ .../resource_cloudfunctions2_function_test.go | 169 +++++ .../compute/resource_compute_address.go | 1 - .../resource_compute_backend_service.go | 2 - .../resource_compute_forwarding_rule.go | 3 - ...resource_compute_global_forwarding_rule.go | 3 - .../compute/resource_compute_health_check.go | 1 - .../compute/resource_compute_instance.go | 46 +- .../compute/resource_compute_instance_test.go | 143 ++++ ...esource_compute_managed_ssl_certificate.go | 1 - .../resource_compute_network_attachment.go | 623 ++++++++++++++++++ ...mpute_network_attachment_generated_test.go | 219 ++++++ ...urce_compute_network_attachment_sweeper.go | 139 ++++ ...resource_compute_region_backend_service.go | 2 - .../resource_compute_region_health_check.go | 1 - ...resource_compute_region_ssl_certificate.go | 1 - .../compute/resource_compute_reservation.go | 34 +- ..._compute_shared_reservation_update_test.go | 1 - .../resource_compute_ssl_certificate.go | 1 - .../resource_compute_target_https_proxy.go | 28 + ...esource_compute_target_https_proxy_test.go | 1 + .../compute/resource_compute_url_map_test.go | 1 - .../resource_data_fusion_instance.go | 148 ++++- ...rce_data_fusion_instance_generated_test.go | 82 ++- .../healthcare/resource_healthcare_dataset.go | 70 ++ ...ource_healthcare_dataset_generated_test.go | 64 ++ .../resource_identity_platform_config.go | 1 + .../logging/resource_logging_bucket_config.go | 1 + .../resource_vertex_ai_feature_group.go | 4 +- .../resource_vertex_ai_feature_group_test.go | 14 +- ...resource_vertex_ai_feature_online_store.go | 186 ++++++ ..._ai_feature_online_store_generated_test.go | 49 ++ .../resource_vmwareengine_network_sweeper.go | 161 +++-- ...urce_vmwareengine_private_cloud_sweeper.go | 139 ++++ google/tpgresource/utils.go | 11 +- ...tifact_registry_docker_image.html.markdown | 8 +- .../docs/guides/getting_started.html.markdown | 20 +- .../guides/version_6_upgrade.html.markdown | 4 +- .../docs/r/bigtable_gc_policy.html.markdown | 3 + .../r/cloudfunctions2_function.html.markdown | 170 +++++ .../docs/r/composer_environment.html.markdown | 33 +- website/docs/r/compute_address.html.markdown | 1 - .../r/compute_backend_service.html.markdown | 2 - .../r/compute_forwarding_rule.html.markdown | 3 - ...mpute_global_forwarding_rule.html.markdown | 3 - .../r/compute_http_health_check.html.markdown | 1 - .../compute_https_health_check.html.markdown | 1 - ...pute_managed_ssl_certificate.html.markdown | 1 - .../compute_network_attachment.html.markdown | 11 - ...mpute_region_backend_service.html.markdown | 2 - ...mpute_region_ssl_certificate.html.markdown | 1 - .../r/compute_ssl_certificate.html.markdown | 1 - .../compute_target_https_proxy.html.markdown | 8 + website/docs/r/compute_url_map.html.markdown | 257 ++++++++ .../r/container_aws_cluster.html.markdown | 2 +- .../r/container_aws_node_pool.html.markdown | 2 +- .../r/container_azure_client.html.markdown | 2 +- .../r/container_azure_cluster.html.markdown | 2 +- .../r/container_azure_node_pool.html.markdown | 2 +- .../docs/r/data_fusion_instance.html.markdown | 87 ++- .../docs/r/healthcare_dataset.html.markdown | 58 ++ .../r/vertex_ai_feature_group.html.markdown | 2 +- ...rtex_ai_feature_online_store.html.markdown | 12 +- 78 files changed, 3145 insertions(+), 224 deletions(-) create mode 100644 .changelog/11023.txt create mode 100644 google/services/compute/resource_compute_network_attachment.go create mode 100644 google/services/compute/resource_compute_network_attachment_generated_test.go create mode 100644 google/services/compute/resource_compute_network_attachment_sweeper.go create mode 100644 google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go diff --git a/.changelog/11023.txt b/.changelog/11023.txt new file mode 100644 index 00000000000..42b910df155 --- /dev/null +++ b/.changelog/11023.txt @@ -0,0 +1,3 @@ +```release-note:none + +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 491855828e3..9aeb1b3967d 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.24.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/go.sum b/go.sum index cd2fce52ae5..30534d3e3b1 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodE dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 h1:LIPIYi4hy7ttUSrziY/TYwMDuEvvV593n80kRmz6nZ4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= diff --git a/google/functions/location_from_id_test.go b/google/functions/location_from_id_test.go index bc5749d7649..6f0fce692dd 100644 --- a/google/functions/location_from_id_test.go +++ b/google/functions/location_from_id_test.go @@ -13,8 +13,6 @@ import ( func TestAccProviderFunction_location_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) location := "us-central1" locationRegex := regexp.MustCompile(fmt.Sprintf("^%s$", location)) diff --git a/google/functions/name_from_id_test.go b/google/functions/name_from_id_test.go index 6e35b21dff8..a1130fa6783 100644 --- a/google/functions/name_from_id_test.go +++ b/google/functions/name_from_id_test.go @@ -13,8 +13,6 @@ import ( func TestAccProviderFunction_name_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) context := map[string]interface{}{ "function_name": "name_from_id", diff --git a/google/functions/project_from_id_test.go b/google/functions/project_from_id_test.go index 1151a607452..2b0655a725b 100644 --- a/google/functions/project_from_id_test.go +++ b/google/functions/project_from_id_test.go @@ -14,8 +14,6 @@ import ( func TestAccProviderFunction_project_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) projectId := envvar.GetTestProjectFromEnv() projectIdRegex := regexp.MustCompile(fmt.Sprintf("^%s$", projectId)) diff --git a/google/functions/region_from_id_test.go b/google/functions/region_from_id_test.go index 5ea3c960ed7..6150f35dd72 100644 --- a/google/functions/region_from_id_test.go +++ b/google/functions/region_from_id_test.go @@ -14,8 +14,6 @@ import ( func TestAccProviderFunction_region_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) region := envvar.GetTestRegionFromEnv() regionRegex := regexp.MustCompile(fmt.Sprintf("^%s$", region)) diff --git a/google/functions/region_from_zone.go b/google/functions/region_from_zone.go index 146c205ba9f..bab7f9dbbb2 100644 --- a/google/functions/region_from_zone.go +++ b/google/functions/region_from_zone.go @@ -5,6 +5,7 @@ package functions import ( "context" "fmt" + "strings" "github.com/hashicorp/terraform-plugin-framework/function" ) @@ -37,23 +38,24 @@ func (f RegionFromZoneFunction) Definition(ctx context.Context, req function.Def func (f RegionFromZoneFunction) Run(ctx context.Context, req function.RunRequest, resp *function.RunResponse) { // Load arguments from function call - var arg0 string - resp.Error = function.ConcatFuncErrors(req.Arguments.GetArgument(ctx, 0, &arg0)) + var zone string + resp.Error = function.ConcatFuncErrors(req.Arguments.GetArgument(ctx, 0, &zone)) if resp.Error != nil { return } - if arg0 == "" { + if zone == "" { err := function.NewArgumentFuncError(0, "The input string cannot be empty.") resp.Error = function.ConcatFuncErrors(err) return } - if arg0[len(arg0)-2] != '-' { - err := function.NewArgumentFuncError(0, fmt.Sprintf("The input string \"%s\" is not a valid zone name.", arg0)) + zoneParts := strings.Split(zone, "-") + + if len(zoneParts) < 3 { + err := function.NewArgumentFuncError(0, fmt.Sprintf("The input string \"%s\" is not a valid zone name.", zone)) resp.Error = function.ConcatFuncErrors(err) - return + } else { + resp.Error = function.ConcatFuncErrors(resp.Result.Set(ctx, strings.Join(zoneParts[:len(zoneParts)-1], "-"))) } - - resp.Error = function.ConcatFuncErrors(resp.Result.Set(ctx, arg0[:len(arg0)-2])) } diff --git a/google/functions/region_from_zone_test.go b/google/functions/region_from_zone_test.go index cbdda8bfcc4..ad3cbd8235e 100644 --- a/google/functions/region_from_zone_test.go +++ b/google/functions/region_from_zone_test.go @@ -13,8 +13,7 @@ import ( func TestAccProviderFunction_region_from_zone(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) + projectZone := "us-central1-a" projectRegion := "us-central1" projectRegionRegex := regexp.MustCompile(fmt.Sprintf("^%s$", projectRegion)) diff --git a/google/functions/zone_from_id_test.go b/google/functions/zone_from_id_test.go index 4914995e2cd..056cafed2dc 100644 --- a/google/functions/zone_from_id_test.go +++ b/google/functions/zone_from_id_test.go @@ -14,8 +14,6 @@ import ( func TestAccProviderFunction_zone_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) zone := envvar.GetTestZoneFromEnv() zoneRegex := regexp.MustCompile(fmt.Sprintf("^%s$", zone)) diff --git a/google/provider/provider_mmv1_resources.go b/google/provider/provider_mmv1_resources.go index 9068387315c..f5be713c36a 100644 --- a/google/provider/provider_mmv1_resources.go +++ b/google/provider/provider_mmv1_resources.go @@ -414,9 +414,9 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ } // Resources -// Generated resources: 426 +// Generated resources: 427 // Generated IAM resources: 249 -// Total generated resources: 675 +// Total generated resources: 676 var generatedResources = map[string]*schema.Resource{ "google_folder_access_approval_settings": accessapproval.ResourceAccessApprovalFolderSettings(), "google_organization_access_approval_settings": accessapproval.ResourceAccessApprovalOrganizationSettings(), @@ -610,6 +610,7 @@ var generatedResources = map[string]*schema.Resource{ "google_compute_interconnect_attachment": compute.ResourceComputeInterconnectAttachment(), "google_compute_managed_ssl_certificate": compute.ResourceComputeManagedSslCertificate(), "google_compute_network": compute.ResourceComputeNetwork(), + "google_compute_network_attachment": compute.ResourceComputeNetworkAttachment(), "google_compute_network_endpoint": compute.ResourceComputeNetworkEndpoint(), "google_compute_network_endpoint_group": compute.ResourceComputeNetworkEndpointGroup(), "google_compute_network_endpoints": compute.ResourceComputeNetworkEndpoints(), diff --git a/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go b/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go index f0069851deb..080bc173d6c 100644 --- a/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go +++ b/google/services/accesscontextmanager/resource_access_context_manager_service_perimeters.go @@ -1099,14 +1099,13 @@ func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{} return v } l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + apiData := make([]map[string]interface{}, 0, len(l)) for _, raw := range l { original := raw.(map[string]interface{}) if len(original) < 1 { - // Do not include empty json objects coming back from the api continue } - transformed = append(transformed, map[string]interface{}{ + apiData = append(apiData, map[string]interface{}{ "name": flattenAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config), "title": flattenAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config), "description": flattenAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config), @@ -1118,8 +1117,19 @@ func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{} "use_explicit_dry_run_spec": flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["useExplicitDryRunSpec"], d, config), }) } - return transformed + configData := []map[string]interface{}{} + for _, item := range d.Get("service_perimeters").([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "name") + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sorted } + func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } diff --git a/google/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go b/google/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go index 54013a39a1f..8858e8f4a02 100644 --- a/google/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go +++ b/google/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go @@ -28,25 +28,28 @@ func testAccAccessContextManagerServicePerimeters_basicTest(t *testing.T) { Config: testAccAccessContextManagerServicePerimeters_basic(org, "my policy", "level", "storage_perimeter", "bigtable_perimeter", "bigquery_omni_perimeter"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, { Config: testAccAccessContextManagerServicePerimeters_update(org, "my policy", "level", "storage_perimeter", "bigquery_perimeter", "bigtable_perimeter", "bigquery_omni_perimeter"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, { Config: testAccAccessContextManagerServicePerimeters_empty(org, "my policy", "level"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, }, }) diff --git a/google/services/bigtable/resource_bigtable_gc_policy.go b/google/services/bigtable/resource_bigtable_gc_policy.go index 2590bbd70ac..3ae29ca4eb6 100644 --- a/google/services/bigtable/resource_bigtable_gc_policy.go +++ b/google/services/bigtable/resource_bigtable_gc_policy.go @@ -218,6 +218,16 @@ func ResourceBigtableGCPolicy() *schema.Resource { in a replicated instance. Possible values are: "ABANDON".`, ValidateFunc: validation.StringInSlice([]string{"ABANDON", ""}, false), }, + + "ignore_warnings": { + Type: schema.TypeBool, + Optional: true, + Description: `Allows ignoring warnings when updating the GC policy. This can be used + to increase the gc policy on replicated clusters. Doing this may make clusters be + inconsistent for a longer period of time, before using this make sure you understand + the risks listed at https://cloud.google.com/bigtable/docs/garbage-collection#increasing`, + Default: false, + }, }, UseJSONNumber: true, } @@ -255,9 +265,14 @@ func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) er tableName := d.Get("table").(string) columnFamily := d.Get("column_family").(string) + ignoreWarnings := d.Get("ignore_warnings").(bool) + updateOpts := []bigtable.GCPolicyOption{} + if ignoreWarnings { + updateOpts = append(updateOpts, bigtable.IgnoreWarnings()) + } retryFunc := func() error { - reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) + reqErr := c.SetGCPolicyWithOptions(ctx, tableName, columnFamily, gcPolicy, updateOpts...) return reqErr } // The default create timeout is 20 minutes. diff --git a/google/services/bigtable/resource_bigtable_gc_policy_test.go b/google/services/bigtable/resource_bigtable_gc_policy_test.go index b955704389a..adbb0ed9b3e 100644 --- a/google/services/bigtable/resource_bigtable_gc_policy_test.go +++ b/google/services/bigtable/resource_bigtable_gc_policy_test.go @@ -41,6 +41,43 @@ func TestAccBigtableGCPolicy_basic(t *testing.T) { }) } +func TestAccBigtableGCPolicy_ignoreWarnings(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + tableName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + familyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cluster1Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cluster2Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + gcRulesOriginal := `{"rules":[{"max_age":"10h"}]}` + gcRulesNew := `{"rules":[{"max_age":"12h"}]}` + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigtableGCPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, familyName, cluster1Name, cluster2Name, gcRulesOriginal, false), + Check: resource.ComposeTestCheckFunc( + testAccBigtableGCPolicyExists(t, "google_bigtable_gc_policy.policy", true), + resource.TestCheckResourceAttr("google_bigtable_gc_policy.policy", "gc_rules", gcRulesOriginal), + ), + }, + { + Config: testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, familyName, cluster1Name, cluster2Name, gcRulesNew, true), + Check: resource.ComposeTestCheckFunc( + testAccBigtableGCPolicyExists(t, "google_bigtable_gc_policy.policy", true), + resource.TestCheckResourceAttr("google_bigtable_gc_policy.policy", "gc_rules", gcRulesNew), + ), + }, + }, + }) +} + func TestAccBigtableGCPolicy_abandoned(t *testing.T) { // bigtable instance does not use the shared HTTP client, this test creates an instance acctest.SkipIfVcr(t) @@ -565,6 +602,49 @@ resource "google_bigtable_gc_policy" "policy" { `, instanceName, instanceName, tableName, family, family) } +func testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, family string, cluster1 string, cluster2 string, gcRule string, ignoreWarnings bool) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + + cluster { + cluster_id = "%s" + num_nodes = 1 + zone = "us-central1-b" + } + + cluster { + cluster_id = "%s" + num_nodes = 1 + zone = "us-central1-c" + } + + deletion_protection = false +} + +resource "google_bigtable_table" "table" { + name = "%s" + instance_name = google_bigtable_instance.instance.id + + column_family { + family = "%s" + } +} + +resource "google_bigtable_gc_policy" "policy" { + instance_name = google_bigtable_instance.instance.id + table = google_bigtable_table.table.name + column_family = "%s" + gc_rules = < 0 && sa[0] != nil { + if !isEmptyServiceAccountBlock(d) && len(sa) > 0 && sa[0] != nil { saMap := sa[0].(map[string]interface{}) req.Email = saMap["email"].(string) req.Scopes = tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(saMap["scopes"].(*schema.Set))) @@ -2862,6 +2862,11 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool // suppress changes between { } and {scopes:[]} if l[0] != nil { contents := l[0].(map[string]interface{}) + email := contents["email"] + if email != "" { + // if email is non empty, don't suppress the diff + return false + } if scopes, ok := contents["scopes"]; ok { a := scopes.(*schema.Set).List() if a != nil && len(a) > 0 { @@ -2871,3 +2876,42 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool } return true } + +// isEmptyServiceAccountBlock is used to work around an issue when updating +// service accounts. Creating the instance with some scopes but without +// specifying a service account email, assigns default compute service account +// to the instance: +// +// service_account { +// scopes = ["some-scope"] +// } +// +// Then when updating the instance with empty service account: +// +// service_account { +// scopes = [] +// } +// +// the default Terraform behavior is to clear scopes without clearing the +// email. The email was previously computed to be the default service account +// and has not been modified, so the default plan is to leave it unchanged. +// However, when creating a new instance: +// +// service_account { +// scopes = [] +// } +// +// indicates an instance without any service account set. +// isEmptyServiceAccountBlock is used to detect empty service_account block +// and if it is, it is interpreted as no service account and no scopes. +func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { + serviceAccountsConfig := d.GetRawConfig().GetAttr("service_account") + if serviceAccountsConfig.IsNull() || len(serviceAccountsConfig.AsValueSlice()) == 0 { + return true + } + serviceAccount := serviceAccountsConfig.AsValueSlice()[0] + if serviceAccount.GetAttr("email").IsNull() && len(serviceAccount.GetAttr("scopes").AsValueSlice()) == 0 { + return true + } + return false +} diff --git a/google/services/compute/resource_compute_instance_test.go b/google/services/compute/resource_compute_instance_test.go index 7cd607ddc5c..e13151a0c9b 100644 --- a/google/services/compute/resource_compute_instance_test.go +++ b/google/services/compute/resource_compute_instance_test.go @@ -1101,6 +1101,54 @@ func TestAccComputeInstance_serviceAccount(t *testing.T) { }) } +func TestAccComputeInstance_noServiceAccount(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_noServiceAccount(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_serviceAccountEmail_0scopes(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccountEmail_0scopes(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { t.Parallel() @@ -1117,6 +1165,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1126,6 +1175,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1135,6 +1185,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1144,6 +1195,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 3), ), }, @@ -1168,6 +1220,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1177,6 +1230,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 1), ), }, @@ -1186,6 +1240,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -3306,6 +3361,30 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } +func testAccCheckComputeInstanceNoServiceAccount(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 0 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 0, got %d", count) + } + return nil + } +} + +func testAccCheckComputeInstanceMatchServiceAccount(instance *compute.Instance, serviceAcctRegexp string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + email := instance.ServiceAccounts[0].Email + if !regexp.MustCompile(serviceAcctRegexp).MatchString(email) { + return fmt.Errorf("ServiceAccount email didn't match:\"%s\", got \"%s\"", serviceAcctRegexp, email) + } + + return nil + } +} + func testAccCheckComputeInstanceScopes(instance *compute.Instance, scopeCount int) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -5277,6 +5356,70 @@ resource "google_compute_instance" "foobar" { `, instance) } +func testAccComputeInstance_noServiceAccount(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [] + } +} +`, instance) +} + +func testAccComputeInstance_serviceAccountEmail_0scopes(instance string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = [] + } +} + +data "google_compute_default_service_account" "default" { +} +`, instance) +} + func testAccComputeInstance_serviceAccount_update0(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/google/services/compute/resource_compute_managed_ssl_certificate.go b/google/services/compute/resource_compute_managed_ssl_certificate.go index d2c8b8778c7..c9451b7bb8a 100644 --- a/google/services/compute/resource_compute_managed_ssl_certificate.go +++ b/google/services/compute/resource_compute_managed_ssl_certificate.go @@ -94,7 +94,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates.`, }, "type": { diff --git a/google/services/compute/resource_compute_network_attachment.go b/google/services/compute/resource_compute_network_attachment.go new file mode 100644 index 00000000000..6976e4e0728 --- /dev/null +++ b/google/services/compute/resource_compute_network_attachment.go @@ -0,0 +1,623 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "net/http" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceComputeNetworkAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkAttachmentCreate, + Read: resourceComputeNetworkAttachmentRead, + Delete: resourceComputeNetworkAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "connection_preference": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ACCEPT_AUTOMATIC", "ACCEPT_MANUAL", "INVALID"}), + Description: `The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. Possible values: ["ACCEPT_AUTOMATIC", "ACCEPT_MANUAL", "INVALID"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.`, + }, + "subnetworks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when you create the resource.`, + }, + "producer_accept_lists": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Projects that are allowed to connect to this network attachment. The project can be specified using its id or number.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "producer_reject_lists": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "connection_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `An array of connections for all the producers connected to this network attachment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.`, + }, + "project_id_or_num": { + Type: schema.TypeString, + Computed: true, + Description: `The project id or number of the interface to which the IP was assigned.`, + }, + "secondary_ip_cidr_ranges": { + Type: schema.TypeString, + Computed: true, + Description: `Alias IP ranges from the same subnetwork.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `The status of a connected endpoint to this network attachment.`, + }, + "subnetwork": { + Type: schema.TypeString, + Computed: true, + Description: `The subnetwork used to assign the IP to the producer instance network interface.`, + }, + }, + }, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource. A hash of the contents stored in this object. This +field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.`, + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the resource type. The server generates this identifier.`, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + Description: `Type of the resource.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. +Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for the resource.`, + }, + "self_link_with_id": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for this resource's resource id.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeNetworkAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + connectionPreferenceProp, err := expandComputeNetworkAttachmentConnectionPreference(d.Get("connection_preference"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_preference"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectionPreferenceProp)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { + obj["connectionPreference"] = connectionPreferenceProp + } + subnetworksProp, err := expandComputeNetworkAttachmentSubnetworks(d.Get("subnetworks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetworks"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworksProp)) && (ok || !reflect.DeepEqual(v, subnetworksProp)) { + obj["subnetworks"] = subnetworksProp + } + producerRejectListsProp, err := expandComputeNetworkAttachmentProducerRejectLists(d.Get("producer_reject_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("producer_reject_lists"); !tpgresource.IsEmptyValue(reflect.ValueOf(producerRejectListsProp)) && (ok || !reflect.DeepEqual(v, producerRejectListsProp)) { + obj["producerRejectLists"] = producerRejectListsProp + } + producerAcceptListsProp, err := expandComputeNetworkAttachmentProducerAcceptLists(d.Get("producer_accept_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("producer_accept_lists"); !tpgresource.IsEmptyValue(reflect.ValueOf(producerAcceptListsProp)) && (ok || !reflect.DeepEqual(v, producerAcceptListsProp)) { + obj["producerAcceptLists"] = producerAcceptListsProp + } + fingerprintProp, err := expandComputeNetworkAttachmentFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + nameProp, err := expandComputeNetworkAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + regionProp, err := expandComputeNetworkAttachmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating NetworkAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkAttachment: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkAttachment %q: %#v", d.Id(), res) + + return resourceComputeNetworkAttachmentRead(d, meta) +} + +func resourceComputeNetworkAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkAttachment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + + if err := d.Set("kind", flattenComputeNetworkAttachmentKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("id", flattenComputeNetworkAttachmentId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeNetworkAttachmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("description", flattenComputeNetworkAttachmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("self_link", flattenComputeNetworkAttachmentSelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("self_link_with_id", flattenComputeNetworkAttachmentSelfLinkWithId(res["selfLinkWithId"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("connection_preference", flattenComputeNetworkAttachmentConnectionPreference(res["connectionPreference"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("connection_endpoints", flattenComputeNetworkAttachmentConnectionEndpoints(res["connectionEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("subnetworks", flattenComputeNetworkAttachmentSubnetworks(res["subnetworks"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("producer_reject_lists", flattenComputeNetworkAttachmentProducerRejectLists(res["producerRejectLists"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("producer_accept_lists", flattenComputeNetworkAttachmentProducerAcceptLists(res["producerAcceptLists"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("fingerprint", flattenComputeNetworkAttachmentFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("network", flattenComputeNetworkAttachmentNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("name", flattenComputeNetworkAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("region", flattenComputeNetworkAttachmentRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + + return nil +} + +func resourceComputeNetworkAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + log.Printf("[DEBUG] Deleting NetworkAttachment %q", d.Id()) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + Headers: headers, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NetworkAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NetworkAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/regions/(?P[^/]+)/networkAttachments/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNetworkAttachmentKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSelfLinkWithId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionPreference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "status": flattenComputeNetworkAttachmentConnectionEndpointsStatus(original["status"], d, config), + "project_id_or_num": flattenComputeNetworkAttachmentConnectionEndpointsProjectIdOrNum(original["projectIdOrNum"], d, config), + "subnetwork": flattenComputeNetworkAttachmentConnectionEndpointsSubnetwork(original["subnetwork"], d, config), + "ip_address": flattenComputeNetworkAttachmentConnectionEndpointsIpAddress(original["ipAddress"], d, config), + "secondary_ip_cidr_ranges": flattenComputeNetworkAttachmentConnectionEndpointsSecondaryIpCidrRanges(original["secondaryIpCidrRanges"], d, config), + }) + } + return transformed +} +func flattenComputeNetworkAttachmentConnectionEndpointsStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsProjectIdOrNum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsSecondaryIpCidrRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeNetworkAttachmentProducerRejectLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentProducerAcceptLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeNetworkAttachmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentConnectionPreference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for subnetworks: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetworks: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeNetworkAttachmentProducerRejectLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentProducerAcceptLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/google/services/compute/resource_compute_network_attachment_generated_test.go b/google/services/compute/resource_compute_network_attachment_generated_test.go new file mode 100644 index 00000000000..a2c134114bd --- /dev/null +++ b/google/services/compute/resource_compute_network_attachment_generated_test.go @@ -0,0 +1,219 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeNetworkAttachment_networkAttachmentBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkAttachmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkAttachment_networkAttachmentBasicExample(context), + }, + { + ResourceName: "google_compute_network_attachment.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccComputeNetworkAttachment_networkAttachmentBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_attachment" "default" { + name = "tf-test-basic-network-attachment%{random_suffix}" + region = "us-central1" + description = "basic network attachment description" + connection_preference = "ACCEPT_MANUAL" + + subnetworks = [ + google_compute_subnetwork.default.self_link + ] + + producer_accept_lists = [ + google_project.accepted_producer_project.project_id + ] + + producer_reject_lists = [ + google_project.rejected_producer_project.project_id + ] +} + +resource "google_compute_network" "default" { + name = "tf-test-basic-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-basic-subnetwork%{random_suffix}" + region = "us-central1" + + network = google_compute_network.default.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_project" "rejected_producer_project" { + project_id = "tf-test-prj-rejected%{random_suffix}" + name = "tf-test-prj-rejected%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project" "accepted_producer_project" { + project_id = "tf-test-prj-accepted%{random_suffix}" + name = "tf-test-prj-accepted%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +`, context) +} + +func TestAccComputeNetworkAttachment_networkAttachmentInstanceUsageExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkAttachmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkAttachment_networkAttachmentInstanceUsageExample(context), + }, + { + ResourceName: "google_compute_network_attachment.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccComputeNetworkAttachment_networkAttachmentInstanceUsageExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "tf-test-basic-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-basic-subnetwork%{random_suffix}" + region = "us-central1" + + network = google_compute_network.default.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "default" { + name = "tf-test-basic-network-attachment%{random_suffix}" + region = "us-central1" + description = "my basic network attachment" + + subnetworks = [google_compute_subnetwork.default.id] + connection_preference = "ACCEPT_AUTOMATIC" +} + +resource "google_compute_instance" "default" { + name = "tf-test-basic-instance%{random_suffix}" + zone = "us-central1-a" + machine_type = "e2-micro" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = "default" + } + + network_interface { + network_attachment = google_compute_network_attachment.default.self_link + } +} +`, context) +} + +func testAccCheckComputeNetworkAttachmentDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network_attachment" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeNetworkAttachment still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/services/compute/resource_compute_network_attachment_sweeper.go b/google/services/compute/resource_compute_network_attachment_sweeper.go new file mode 100644 index 00000000000..ef9b0d56e4e --- /dev/null +++ b/google/services/compute/resource_compute_network_attachment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkAttachment", testSweepComputeNetworkAttachment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetworkAttachment(region string) error { + resourceName := "ComputeNetworkAttachment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/networkAttachments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/services/compute/resource_compute_region_backend_service.go b/google/services/compute/resource_compute_region_backend_service.go index 590d84a1335..07dac285864 100644 --- a/google/services/compute/resource_compute_region_backend_service.go +++ b/google/services/compute/resource_compute_region_backend_service.go @@ -641,7 +641,6 @@ The possible values are: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, @@ -651,7 +650,6 @@ This field is applicable to either: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. diff --git a/google/services/compute/resource_compute_region_health_check.go b/google/services/compute/resource_compute_region_health_check.go index 23bb04154d7..9cdb5daaaa1 100644 --- a/google/services/compute/resource_compute_region_health_check.go +++ b/google/services/compute/resource_compute_region_health_check.go @@ -2074,7 +2074,6 @@ func expandComputeRegionHealthCheckRegion(v interface{}, d tpgresource.Terraform } func resourceComputeRegionHealthCheckEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if _, ok := d.GetOk("http_health_check"); ok { hc := d.Get("http_health_check").([]interface{})[0] ps := hc.(map[string]interface{})["port_specification"] diff --git a/google/services/compute/resource_compute_region_ssl_certificate.go b/google/services/compute/resource_compute_region_ssl_certificate.go index 7d220be9921..d4e824e9887 100644 --- a/google/services/compute/resource_compute_region_ssl_certificate.go +++ b/google/services/compute/resource_compute_region_ssl_certificate.go @@ -90,7 +90,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates.`, }, "region": { diff --git a/google/services/compute/resource_compute_reservation.go b/google/services/compute/resource_compute_reservation.go index e24540bc258..5152a49ae77 100644 --- a/google/services/compute/resource_compute_reservation.go +++ b/google/services/compute/resource_compute_reservation.go @@ -213,6 +213,7 @@ for information on available CPU platforms.`, Type: schema.TypeString, Computed: true, Optional: true, + ForceNew: true, ValidateFunc: verify.ValidateEnum([]string{"LOCAL", "SPECIFIC_PROJECTS", ""}), Description: `Type of sharing for this shared-reservation Possible values: ["LOCAL", "SPECIFIC_PROJECTS"]`, }, @@ -1128,10 +1129,27 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf // Set project_map. projectMap := make(map[string]interface{}) old, new := d.GetChange("share_settings") - oldMap := old.([]interface{})[0].(map[string]interface{})["project_map"] - newMap := new.([]interface{})[0].(map[string]interface{})["project_map"] - before := oldMap.(*schema.Set) - after := newMap.(*schema.Set) + + var before *schema.Set + if oldSlice, ok := old.([]interface{}); ok && len(oldSlice) > 0 { + if oldMap, ok := oldSlice[0].(map[string]interface{})["project_map"]; ok { + before = oldMap.(*schema.Set) + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + var after *schema.Set + if newSlice, ok := new.([]interface{}); ok && len(newSlice) > 0 { + if newMap, ok := newSlice[0].(map[string]interface{})["project_map"]; ok { + after = newMap.(*schema.Set) + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } for _, raw := range after.Difference(before).List() { original := raw.(map[string]interface{}) @@ -1147,10 +1165,10 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf } projectMap[transformedId] = singleProject // add added projects to updateMask - if firstProject != true { - maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["project_id"]) + if !firstProject { + maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["id"]) } else { - maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["project_id"]) + maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["id"]) firstProject = false } decodedPath, _ := url.QueryUnescape(maskId) @@ -1177,7 +1195,7 @@ func resourceComputeReservationUpdateEncoder(d *schema.ResourceData, meta interf projectNum := project.ProjectNumber projectIdOrNum = fmt.Sprintf("%d", projectNum) } - if firstProject != true { + if !firstProject { maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", projectIdOrNum) } else { maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", projectIdOrNum) diff --git a/google/services/compute/resource_compute_shared_reservation_update_test.go b/google/services/compute/resource_compute_shared_reservation_update_test.go index 82795badf01..98a59bb1f91 100644 --- a/google/services/compute/resource_compute_shared_reservation_update_test.go +++ b/google/services/compute/resource_compute_shared_reservation_update_test.go @@ -11,7 +11,6 @@ import ( ) func TestAccComputeSharedReservation_update(t *testing.T) { - acctest.SkipIfVcr(t) // large number of parallel resources. t.Parallel() context := map[string]interface{}{ diff --git a/google/services/compute/resource_compute_ssl_certificate.go b/google/services/compute/resource_compute_ssl_certificate.go index d6e8857280f..936c060c935 100644 --- a/google/services/compute/resource_compute_ssl_certificate.go +++ b/google/services/compute/resource_compute_ssl_certificate.go @@ -90,7 +90,6 @@ first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates.`, }, "certificate_id": { diff --git a/google/services/compute/resource_compute_target_https_proxy.go b/google/services/compute/resource_compute_target_https_proxy.go index b72a16c0f23..7a4ebe74f7e 100644 --- a/google/services/compute/resource_compute_target_https_proxy.go +++ b/google/services/compute/resource_compute_target_https_proxy.go @@ -166,6 +166,17 @@ sslCertificates and certificateManagerCertificates can not be defined together.` Description: `A reference to the SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured.`, + }, + "tls_early_data": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"STRICT", "PERMISSIVE", "DISABLED", ""}), + Description: `Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. +Early Data allows a TLS resumption handshake to include the initial application payload +(a HTTP request) alongside the handshake, reducing the effective round trips to “zero”. +This applies to TLS 1.3 connections over TCP (HTTP/2) as well as over UDP (QUIC/h3). Possible values: ["STRICT", "PERMISSIVE", "DISABLED"]`, }, "creation_timestamp": { Type: schema.TypeString, @@ -218,6 +229,12 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac } else if v, ok := d.GetOkExists("quic_override"); !tpgresource.IsEmptyValue(reflect.ValueOf(quicOverrideProp)) && (ok || !reflect.DeepEqual(v, quicOverrideProp)) { obj["quicOverride"] = quicOverrideProp } + tlsEarlyDataProp, err := expandComputeTargetHttpsProxyTlsEarlyData(d.Get("tls_early_data"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tls_early_data"); !tpgresource.IsEmptyValue(reflect.ValueOf(tlsEarlyDataProp)) && (ok || !reflect.DeepEqual(v, tlsEarlyDataProp)) { + obj["tlsEarlyData"] = tlsEarlyDataProp + } certificateManagerCertificatesProp, err := expandComputeTargetHttpsProxyCertificateManagerCertificates(d.Get("certificate_manager_certificates"), d, config) if err != nil { return err @@ -397,6 +414,9 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ if err := d.Set("quic_override", flattenComputeTargetHttpsProxyQuicOverride(res["quicOverride"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } + if err := d.Set("tls_early_data", flattenComputeTargetHttpsProxyTlsEarlyData(res["tlsEarlyData"], d, config)); err != nil { + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } if err := d.Set("certificate_manager_certificates", flattenComputeTargetHttpsProxyCertificateManagerCertificates(res["certificateManagerCertificates"], d, config)); err != nil { return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) } @@ -820,6 +840,10 @@ func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *schema.Resourc return v } +func flattenComputeTargetHttpsProxyTlsEarlyData(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeTargetHttpsProxyCertificateManagerCertificates(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -889,6 +913,10 @@ func expandComputeTargetHttpsProxyQuicOverride(v interface{}, d tpgresource.Terr return v, nil } +func expandComputeTargetHttpsProxyTlsEarlyData(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeTargetHttpsProxyCertificateManagerCertificates(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/google/services/compute/resource_compute_target_https_proxy_test.go b/google/services/compute/resource_compute_target_https_proxy_test.go index ad59b3634fd..af3ee764580 100644 --- a/google/services/compute/resource_compute_target_https_proxy_test.go +++ b/google/services/compute/resource_compute_target_https_proxy_test.go @@ -223,6 +223,7 @@ resource "google_compute_target_https_proxy" "foobar" { google_compute_ssl_certificate.foobar2.self_link, ] quic_override = "ENABLE" + tls_early_data = "STRICT" } resource "google_compute_backend_service" "foobar" { diff --git a/google/services/compute/resource_compute_url_map_test.go b/google/services/compute/resource_compute_url_map_test.go index dc2450068f5..063f9d62512 100644 --- a/google/services/compute/resource_compute_url_map_test.go +++ b/google/services/compute/resource_compute_url_map_test.go @@ -350,7 +350,6 @@ func TestAccComputeUrlMap_defaultUrlRedirect(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix), diff --git a/google/services/datafusion/resource_data_fusion_instance.go b/google/services/datafusion/resource_data_fusion_instance.go index a0370f93254..939f4416594 100644 --- a/google/services/datafusion/resource_data_fusion_instance.go +++ b/google/services/datafusion/resource_data_fusion_instance.go @@ -215,21 +215,65 @@ Please refer to the field 'effective_labels' for all of the labels present on th MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "connection_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"VPC_PEERING", "PRIVATE_SERVICE_CONNECT_INTERFACES", ""}), + Description: `Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and +the corresponding tenant project from a predefined list of available connection modes. +If this field is unspecified for a private instance, VPC peering is used. Possible values: ["VPC_PEERING", "PRIVATE_SERVICE_CONNECT_INTERFACES"]`, + }, "ip_allocation": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Description: `The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network.`, }, "network": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, Description: `Name of the network in the project with which the tenant project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network}`, }, + "private_service_connect_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Optional. Configuration for Private Service Connect. +This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_attachment": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. The reference to the network attachment used to establish private connectivity. +It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. +This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES.`, + }, + "unreachable_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. +The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. +This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. +If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632.`, + }, + "effective_unreachable_cidr_block": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. +The size of this block is /25. The format of this field is governed by RFC 4632.`, + }, + }, + }, + }, }, }, }, @@ -961,6 +1005,10 @@ func flattenDataFusionInstanceNetworkConfig(v interface{}, d *schema.ResourceDat flattenDataFusionInstanceNetworkConfigIpAllocation(original["ipAllocation"], d, config) transformed["network"] = flattenDataFusionInstanceNetworkConfigNetwork(original["network"], d, config) + transformed["connection_type"] = + flattenDataFusionInstanceNetworkConfigConnectionType(original["connectionType"], d, config) + transformed["private_service_connect_config"] = + flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfig(original["privateServiceConnectConfig"], d, config) return []interface{}{transformed} } func flattenDataFusionInstanceNetworkConfigIpAllocation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -971,6 +1019,39 @@ func flattenDataFusionInstanceNetworkConfigNetwork(v interface{}, d *schema.Reso return v } +func flattenDataFusionInstanceNetworkConfigConnectionType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["network_attachment"] = + flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigNetworkAttachment(original["networkAttachment"], d, config) + transformed["unreachable_cidr_block"] = + flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigUnreachableCidrBlock(original["unreachableCidrBlock"], d, config) + transformed["effective_unreachable_cidr_block"] = + flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigEffectiveUnreachableCidrBlock(original["effectiveUnreachableCidrBlock"], d, config) + return []interface{}{transformed} +} +func flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigNetworkAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigUnreachableCidrBlock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("network_config.0.private_service_connect_config.0.unreachable_cidr_block") +} + +func flattenDataFusionInstanceNetworkConfigPrivateServiceConnectConfigEffectiveUnreachableCidrBlock(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenDataFusionInstanceZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } @@ -1143,6 +1224,20 @@ func expandDataFusionInstanceNetworkConfig(v interface{}, d tpgresource.Terrafor transformed["network"] = transformedNetwork } + transformedConnectionType, err := expandDataFusionInstanceNetworkConfigConnectionType(original["connection_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnectionType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["connectionType"] = transformedConnectionType + } + + transformedPrivateServiceConnectConfig, err := expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfig(original["private_service_connect_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateServiceConnectConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateServiceConnectConfig"] = transformedPrivateServiceConnectConfig + } + return transformed, nil } @@ -1154,6 +1249,55 @@ func expandDataFusionInstanceNetworkConfigNetwork(v interface{}, d tpgresource.T return v, nil } +func expandDataFusionInstanceNetworkConfigConnectionType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNetworkAttachment, err := expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigNetworkAttachment(original["network_attachment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetworkAttachment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkAttachment"] = transformedNetworkAttachment + } + + transformedUnreachableCidrBlock, err := expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigUnreachableCidrBlock(original["unreachable_cidr_block"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUnreachableCidrBlock); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["unreachableCidrBlock"] = transformedUnreachableCidrBlock + } + + transformedEffectiveUnreachableCidrBlock, err := expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigEffectiveUnreachableCidrBlock(original["effective_unreachable_cidr_block"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEffectiveUnreachableCidrBlock); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["effectiveUnreachableCidrBlock"] = transformedEffectiveUnreachableCidrBlock + } + + return transformed, nil +} + +func expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigNetworkAttachment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigUnreachableCidrBlock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataFusionInstanceNetworkConfigPrivateServiceConnectConfigEffectiveUnreachableCidrBlock(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandDataFusionInstanceZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } diff --git a/google/services/datafusion/resource_data_fusion_instance_generated_test.go b/google/services/datafusion/resource_data_fusion_instance_generated_test.go index ecc5bfd11ab..d0bd0ebfc64 100644 --- a/google/services/datafusion/resource_data_fusion_instance_generated_test.go +++ b/google/services/datafusion/resource_data_fusion_instance_generated_test.go @@ -50,7 +50,7 @@ func TestAccDataFusionInstance_dataFusionInstanceBasicExample(t *testing.T) { ResourceName: "google_data_fusion_instance.basic_instance", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) @@ -87,7 +87,7 @@ func TestAccDataFusionInstance_dataFusionInstanceFullExample(t *testing.T) { ResourceName: "google_data_fusion_instance.extended_instance", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) @@ -139,6 +139,76 @@ resource "google_compute_global_address" "private_ip_alloc" { `, context) } +func TestAccDataFusionInstance_dataFusionInstancePscExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "prober_test_run": `options = { prober_test_run = "true" }`, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataFusionInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataFusionInstance_dataFusionInstancePscExample(context), + }, + { + ResourceName: "google_data_fusion_instance.psc_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, + }, + }, + }) +} + +func testAccDataFusionInstance_dataFusionInstancePscExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_data_fusion_instance" "psc_instance" { + name = "tf-test-psc-instance%{random_suffix}" + region = "us-central1" + type = "BASIC" + private_instance = true + + network_config { + connection_type = "PRIVATE_SERVICE_CONNECT_INTERFACES" + private_service_connect_config { + network_attachment = google_compute_network_attachment.psc.id + unreachable_cidr_block = "192.168.0.0/25" + } + } + + %{prober_test_run} +} + +resource "google_compute_network" "psc" { + name = "tf-test-datafusion-psc-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc" { + name = "tf-test-datafusion-psc-subnet%{random_suffix}" + region = "us-central1" + + network = google_compute_network.psc.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "psc" { + name = "tf-test-datafusion-psc-attachment%{random_suffix}" + region = "us-central1" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.psc.self_link + ] +} +`, context) +} + func TestAccDataFusionInstance_dataFusionInstanceCmekExample(t *testing.T) { t.Parallel() @@ -158,7 +228,7 @@ func TestAccDataFusionInstance_dataFusionInstanceCmekExample(t *testing.T) { ResourceName: "google_data_fusion_instance.cmek", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) @@ -219,7 +289,7 @@ func TestAccDataFusionInstance_dataFusionInstanceEnterpriseExample(t *testing.T) ResourceName: "google_data_fusion_instance.enterprise_instance", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) @@ -256,7 +326,7 @@ func TestAccDataFusionInstance_dataFusionInstanceEventExample(t *testing.T) { ResourceName: "google_data_fusion_instance.event", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) @@ -300,7 +370,7 @@ func TestAccDataFusionInstance_dataFusionInstanceZoneExample(t *testing.T) { ResourceName: "google_data_fusion_instance.zone", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"labels", "region", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"labels", "network_config.0.private_service_connect_config.0.unreachable_cidr_block", "region", "terraform_labels"}, }, }, }) diff --git a/google/services/healthcare/resource_healthcare_dataset.go b/google/services/healthcare/resource_healthcare_dataset.go index 1d0aa485393..c424e7dbb3d 100644 --- a/google/services/healthcare/resource_healthcare_dataset.go +++ b/google/services/healthcare/resource_healthcare_dataset.go @@ -66,6 +66,27 @@ func ResourceHealthcareDataset() *schema.Resource { ForceNew: true, Description: `The resource name for the Dataset.`, }, + "encryption_spec": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: `A nested object resource`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `KMS encryption key that is used to secure this dataset and its sub-resources. The key used for +encryption and the dataset must be in the same location. If empty, the default Google encryption +key will be used to secure this dataset. The format is +projects/{projectId}/locations/{locationId}/keyRings/{keyRingId}/cryptoKeys/{keyId}.`, + }, + }, + }, + }, "time_zone": { Type: schema.TypeString, Computed: true, @@ -110,6 +131,12 @@ func resourceHealthcareDatasetCreate(d *schema.ResourceData, meta interface{}) e } else if v, ok := d.GetOkExists("time_zone"); !tpgresource.IsEmptyValue(reflect.ValueOf(timeZoneProp)) && (ok || !reflect.DeepEqual(v, timeZoneProp)) { obj["timeZone"] = timeZoneProp } + encryptionSpecProp, err := expandHealthcareDatasetEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !tpgresource.IsEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } url, err := tpgresource.ReplaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}") if err != nil { @@ -219,6 +246,9 @@ func resourceHealthcareDatasetRead(d *schema.ResourceData, meta interface{}) err if err := d.Set("time_zone", flattenHealthcareDatasetTimeZone(res["timeZone"], d, config)); err != nil { return fmt.Errorf("Error reading Dataset: %s", err) } + if err := d.Set("encryption_spec", flattenHealthcareDatasetEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Dataset: %s", err) + } return nil } @@ -372,6 +402,23 @@ func flattenHealthcareDatasetTimeZone(v interface{}, d *schema.ResourceData, con return v } +func flattenHealthcareDatasetEncryptionSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenHealthcareDatasetEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenHealthcareDatasetEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandHealthcareDatasetName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -380,6 +427,29 @@ func expandHealthcareDatasetTimeZone(v interface{}, d tpgresource.TerraformResou return v, nil } +func expandHealthcareDatasetEncryptionSpec(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandHealthcareDatasetEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandHealthcareDatasetEncryptionSpecKmsKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func resourceHealthcareDatasetDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { // Take the returned long form of the name and use it as `self_link`. // Then modify the name to be the user specified form. diff --git a/google/services/healthcare/resource_healthcare_dataset_generated_test.go b/google/services/healthcare/resource_healthcare_dataset_generated_test.go index 95d0120f393..1cf08d51e2c 100644 --- a/google/services/healthcare/resource_healthcare_dataset_generated_test.go +++ b/google/services/healthcare/resource_healthcare_dataset_generated_test.go @@ -65,6 +65,70 @@ resource "google_healthcare_dataset" "default" { `, context) } +func TestAccHealthcareDataset_healthcareDatasetCmekExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckHealthcareDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccHealthcareDataset_healthcareDatasetCmekExample(context), + }, + { + ResourceName: "google_healthcare_dataset.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "self_link"}, + }, + }, + }) +} + +func testAccHealthcareDataset_healthcareDatasetCmekExample(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_healthcare_dataset" "default" { + name = "tf-test-example-dataset%{random_suffix}" + location = "us-central1" + time_zone = "UTC" + + encryption_spec { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.healthcare_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "tf-test-example-key%{random_suffix}" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "tf-test-example-keyring%{random_suffix}" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_binding" "healthcare_cmek_keyuser" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + ] +} +`, context) +} + func testAccCheckHealthcareDatasetDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/services/identityplatform/resource_identity_platform_config.go b/google/services/identityplatform/resource_identity_platform_config.go index ce59663d025..a5bc861d092 100644 --- a/google/services/identityplatform/resource_identity_platform_config.go +++ b/google/services/identityplatform/resource_identity_platform_config.go @@ -421,6 +421,7 @@ email/password or email link.`, }, "sms_region_config": { Type: schema.TypeList, + Computed: true, Optional: true, Description: `Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number.`, MaxItems: 1, diff --git a/google/services/logging/resource_logging_bucket_config.go b/google/services/logging/resource_logging_bucket_config.go index 336d3970e20..e0d33e82a07 100644 --- a/google/services/logging/resource_logging_bucket_config.go +++ b/google/services/logging/resource_logging_bucket_config.go @@ -95,6 +95,7 @@ See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/ro Type: schema.TypeSet, MaxItems: 20, Optional: true, + Computed: true, Description: `A list of indexed fields and related configuration data.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ diff --git a/google/services/vertexai/resource_vertex_ai_feature_group.go b/google/services/vertexai/resource_vertex_ai_feature_group.go index 0ed91b1e239..75303e2abb4 100644 --- a/google/services/vertexai/resource_vertex_ai_feature_group.go +++ b/google/services/vertexai/resource_vertex_ai_feature_group.go @@ -81,7 +81,7 @@ func ResourceVertexAIFeatureGroup() *schema.Resource { "entity_id_columns": { Type: schema.TypeList, Optional: true, - Description: `Columns to construct entityId / row keys. Currently only supports 1 entity_id_column. If not provided defaults to entityId.`, + Description: `Columns to construct entityId / row keys. If not provided defaults to entityId.`, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -385,7 +385,7 @@ func resourceVertexAIFeatureGroupUpdate(d *schema.ResourceData, meta interface{} } if d.HasChange("big_query") { - updateMask = append(updateMask, "bigQuery") + updateMask = append(updateMask, "bigQuery.entityIdColumns") } if d.HasChange("effective_labels") { diff --git a/google/services/vertexai/resource_vertex_ai_feature_group_test.go b/google/services/vertexai/resource_vertex_ai_feature_group_test.go index c0775e0fab5..2647f2a9acc 100644 --- a/google/services/vertexai/resource_vertex_ai_feature_group_test.go +++ b/google/services/vertexai/resource_vertex_ai_feature_group_test.go @@ -80,6 +80,12 @@ resource "google_bigquery_table" "sample_table" { "type": "STRING", "mode": "NULLABLE" }, + { + "name": "test_entity_column", + "type": "STRING", + "mode": "NULLABLE", + "description": "test secondary entity column" + }, { "name": "feature_timestamp", "type": "TIMESTAMP", @@ -105,7 +111,7 @@ func testAccVertexAIFeatureGroup_updated(context map[string]interface{}) string big_query_source { input_uri = "bq://${google_bigquery_table.sample_table.project}.${google_bigquery_table.sample_table.dataset_id}.${google_bigquery_table.sample_table.table_id}" } - entity_id_columns = ["feature_id"] + entity_id_columns = ["feature_id","test_entity_column"] } } @@ -128,6 +134,12 @@ resource "google_bigquery_table" "sample_table" { "type": "STRING", "mode": "NULLABLE" }, + { + "name": "test_entity_column", + "type": "STRING", + "mode": "NULLABLE", + "description": "test secondary entity column" + }, { "name": "feature_timestamp", "type": "TIMESTAMP", diff --git a/google/services/vertexai/resource_vertex_ai_feature_online_store.go b/google/services/vertexai/resource_vertex_ai_feature_online_store.go index 0cd56e46216..45700a0a098 100644 --- a/google/services/vertexai/resource_vertex_ai_feature_online_store.go +++ b/google/services/vertexai/resource_vertex_ai_feature_online_store.go @@ -98,6 +98,50 @@ func ResourceVertexAIFeatureOnlineStore() *schema.Resource { }, ExactlyOneOf: []string{"bigtable", "optimized"}, }, + "dedicated_serving_endpoint": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to be set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "private_service_connect_config": { + Type: schema.TypeList, + Optional: true, + Description: `Private service connect config.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_private_service_connect": { + Type: schema.TypeBool, + Required: true, + Description: `If set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint.`, + }, + "project_allowlist": { + Type: schema.TypeList, + Optional: true, + Description: `A list of Projects from which the forwarding rule will target the service attachment.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "public_endpoint_domain_name": { + Type: schema.TypeString, + Computed: true, + Description: `Domain name to use for this FeatureOnlineStore`, + }, + "service_attachment": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the service attachment resource. Applicable only if private service connect is enabled and after FeatureViewSync is created.`, + }, + }, + }, + }, "labels": { Type: schema.TypeMap, Optional: true, @@ -195,6 +239,12 @@ func resourceVertexAIFeatureOnlineStoreCreate(d *schema.ResourceData, meta inter } else if v, ok := d.GetOkExists("optimized"); ok || !reflect.DeepEqual(v, optimizedProp) { obj["optimized"] = optimizedProp } + dedicatedServingEndpointProp, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpoint(d.Get("dedicated_serving_endpoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_serving_endpoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(dedicatedServingEndpointProp)) && (ok || !reflect.DeepEqual(v, dedicatedServingEndpointProp)) { + obj["dedicatedServingEndpoint"] = dedicatedServingEndpointProp + } labelsProp, err := expandVertexAIFeatureOnlineStoreEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -334,6 +384,9 @@ func resourceVertexAIFeatureOnlineStoreRead(d *schema.ResourceData, meta interfa if err := d.Set("optimized", flattenVertexAIFeatureOnlineStoreOptimized(res["optimized"], d, config)); err != nil { return fmt.Errorf("Error reading FeatureOnlineStore: %s", err) } + if err := d.Set("dedicated_serving_endpoint", flattenVertexAIFeatureOnlineStoreDedicatedServingEndpoint(res["dedicatedServingEndpoint"], d, config)); err != nil { + return fmt.Errorf("Error reading FeatureOnlineStore: %s", err) + } if err := d.Set("terraform_labels", flattenVertexAIFeatureOnlineStoreTerraformLabels(res["labels"], d, config)); err != nil { return fmt.Errorf("Error reading FeatureOnlineStore: %s", err) } @@ -372,6 +425,12 @@ func resourceVertexAIFeatureOnlineStoreUpdate(d *schema.ResourceData, meta inter } else if v, ok := d.GetOkExists("optimized"); ok || !reflect.DeepEqual(v, optimizedProp) { obj["optimized"] = optimizedProp } + dedicatedServingEndpointProp, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpoint(d.Get("dedicated_serving_endpoint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("dedicated_serving_endpoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, dedicatedServingEndpointProp)) { + obj["dedicatedServingEndpoint"] = dedicatedServingEndpointProp + } labelsProp, err := expandVertexAIFeatureOnlineStoreEffectiveLabels(d.Get("effective_labels"), d, config) if err != nil { return err @@ -396,6 +455,10 @@ func resourceVertexAIFeatureOnlineStoreUpdate(d *schema.ResourceData, meta inter updateMask = append(updateMask, "optimized") } + if d.HasChange("dedicated_serving_endpoint") { + updateMask = append(updateMask, "dedicatedServingEndpoint") + } + if d.HasChange("effective_labels") { updateMask = append(updateMask, "labels") } @@ -647,6 +710,54 @@ func flattenVertexAIFeatureOnlineStoreOptimized(v interface{}, d *schema.Resourc return []interface{}{transformed} } +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpoint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["public_endpoint_domain_name"] = + flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPublicEndpointDomainName(original["publicEndpointDomainName"], d, config) + transformed["service_attachment"] = + flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointServiceAttachment(original["serviceAttachment"], d, config) + transformed["private_service_connect_config"] = + flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig(original["privateServiceConnectConfig"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPublicEndpointDomainName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointServiceAttachment(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_private_service_connect"] = + flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigEnablePrivateServiceConnect(original["enablePrivateServiceConnect"], d, config) + transformed["project_allowlist"] = + flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigProjectAllowlist(original["projectAllowlist"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigEnablePrivateServiceConnect(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigProjectAllowlist(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenVertexAIFeatureOnlineStoreTerraformLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -745,6 +856,81 @@ func expandVertexAIFeatureOnlineStoreOptimized(v interface{}, d tpgresource.Terr return transformed, nil } +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPublicEndpointDomainName, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPublicEndpointDomainName(original["public_endpoint_domain_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublicEndpointDomainName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["publicEndpointDomainName"] = transformedPublicEndpointDomainName + } + + transformedServiceAttachment, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpointServiceAttachment(original["service_attachment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAttachment); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAttachment"] = transformedServiceAttachment + } + + transformedPrivateServiceConnectConfig, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig(original["private_service_connect_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateServiceConnectConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateServiceConnectConfig"] = transformedPrivateServiceConnectConfig + } + + return transformed, nil +} + +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPublicEndpointDomainName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpointServiceAttachment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnablePrivateServiceConnect, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigEnablePrivateServiceConnect(original["enable_private_service_connect"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnablePrivateServiceConnect); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enablePrivateServiceConnect"] = transformedEnablePrivateServiceConnect + } + + transformedProjectAllowlist, err := expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigProjectAllowlist(original["project_allowlist"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProjectAllowlist); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["projectAllowlist"] = transformedProjectAllowlist + } + + return transformed, nil +} + +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigEnablePrivateServiceConnect(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIFeatureOnlineStoreDedicatedServingEndpointPrivateServiceConnectConfigProjectAllowlist(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandVertexAIFeatureOnlineStoreEffectiveLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { if v == nil { return map[string]string{}, nil diff --git a/google/services/vertexai/resource_vertex_ai_feature_online_store_generated_test.go b/google/services/vertexai/resource_vertex_ai_feature_online_store_generated_test.go index 1974d4d02f4..cfffd693143 100644 --- a/google/services/vertexai/resource_vertex_ai_feature_online_store_generated_test.go +++ b/google/services/vertexai/resource_vertex_ai_feature_online_store_generated_test.go @@ -74,6 +74,55 @@ resource "google_vertex_ai_feature_online_store" "feature_online_store" { `, context) } +func TestAccVertexAIFeatureOnlineStore_vertexAiFeatureonlinestoreWithOptimizedExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIFeatureOnlineStoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIFeatureOnlineStore_vertexAiFeatureonlinestoreWithOptimizedExample(context), + }, + { + ResourceName: "google_vertex_ai_feature_online_store.featureonlinestore", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "force_destroy", "labels", "name", "region", "terraform_labels"}, + }, + }, + }) +} + +func testAccVertexAIFeatureOnlineStore_vertexAiFeatureonlinestoreWithOptimizedExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_feature_online_store" "featureonlinestore" { + provider = google + name = "tf_test_example_feature_online_store_optimized%{random_suffix}" + labels = { + foo = "bar" + } + region = "us-central1" + optimized {} + dedicated_serving_endpoint { + private_service_connect_config { + enable_private_service_connect = true + project_allowlist = [data.google_project.project.number] + } + } +} + +data "google_project" "project" { + provider = google +} +`, context) +} + func testAccCheckVertexAIFeatureOnlineStoreDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/google/services/vmwareengine/resource_vmwareengine_network_sweeper.go b/google/services/vmwareengine/resource_vmwareengine_network_sweeper.go index 2fdd029a475..0b01be30034 100644 --- a/google/services/vmwareengine/resource_vmwareengine_network_sweeper.go +++ b/google/services/vmwareengine/resource_vmwareengine_network_sweeper.go @@ -1,20 +1,5 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: MMv1 *** -// -// ---------------------------------------------------------------------------- -// -// This file is automatically generated by Magic Modules and manual -// changes will be clobbered when the file is regenerated. -// -// Please read more about how to change this file in -// .github/CONTRIBUTING.md. -// -// ---------------------------------------------------------------------------- - package vmwareengine import ( @@ -53,86 +38,96 @@ func testSweepVmwareengineNetwork(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["networks"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil + // List of location values includes: + // * global location + // * regions used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "global", "southamerica-west1", "me-west1"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": "-", + "billing_account": billingId, + }, } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { - nonPrefixCount++ - continue - } - - deleteTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "DELETE", + Method: "GET", Project: config.Project, - RawURL: deleteUrl, + RawURL: listUrl, UserAgent: config.UserAgent, }) if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) - } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue } - } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + resourceList, ok := res["vmwareEngineNetworks"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } } return nil diff --git a/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go b/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go new file mode 100644 index 00000000000..ee26c9ac01f --- /dev/null +++ b/google/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package vmwareengine + +import ( + "context" + "fmt" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VmwareenginePrivateCloud", testSweepVmwareenginePrivateCloud) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVmwareenginePrivateCloud(region string) error { + resourceName := "VmwareenginePrivateCloud" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // List of location values includes: + // * zones used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "southamerica-west1-a", "me-west1-a"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue + } + + resourceList, ok := res["privateClouds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // We force delete the Private Cloud and ensure there's no delay in deletion + force := true + delayHours := 0 + deleteUrl = deleteUrl + fmt.Sprintf("?force=%t&delayHours=%d", force, delayHours) + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + } + + return nil +} diff --git a/google/tpgresource/utils.go b/google/tpgresource/utils.go index 539abcde0e2..ab376408eb9 100644 --- a/google/tpgresource/utils.go +++ b/google/tpgresource/utils.go @@ -60,15 +60,14 @@ type TerraformResourceDiff interface { // Contains functions that don't really belong anywhere else. // GetRegionFromZone returns the region from a zone for Google cloud. -// This is by removing the last two chars from the zone name to leave the region -// If there aren't enough characters in the input string, an empty string is returned +// This is by removing the characters after the last '-'. // e.g. southamerica-west1-a => southamerica-west1 func GetRegionFromZone(zone string) string { - if zone != "" && len(zone) > 2 { - region := zone[:len(zone)-2] - return region + zoneParts := strings.Split(zone, "-") + if len(zoneParts) < 3 { + return "" } - return "" + return strings.Join(zoneParts[:len(zoneParts)-1], "-") } // Infers the region based on the following (in order of priority): diff --git a/website/docs/d/artifact_registry_docker_image.html.markdown b/website/docs/d/artifact_registry_docker_image.html.markdown index 4065bd03351..62bed15ebcd 100644 --- a/website/docs/d/artifact_registry_docker_image.html.markdown +++ b/website/docs/d/artifact_registry_docker_image.html.markdown @@ -21,9 +21,9 @@ resource "google_artifact_registry_repository" "my_repo" { } data "google_artifact_registry_docker_image" "my_image" { - repository = google_artifact_registry_repository.my_repo.id - image = "my-image" - tag = "my-tag" + location = google_artifact_registry_repository.my_repo.location + repository_id = google_artifact_registry_repository.my_repo.repository_id + image = "my-image:my-tag" } resource "google_cloud_run_v2_service" "default" { @@ -43,7 +43,7 @@ The following arguments are supported: * `location` - (Required) The location of the artifact registry. -* `repository_id` - (Required) The last part of the repository name. to fetch from. +* `repository_id` - (Required) The last part of the repository name to fetch from. * `image_name` - (Required) The image name to fetch. If no digest or tag is provided, then the latest modified image will be used. diff --git a/website/docs/guides/getting_started.html.markdown b/website/docs/guides/getting_started.html.markdown index 8ecb74a8521..346d72b6c7d 100644 --- a/website/docs/guides/getting_started.html.markdown +++ b/website/docs/guides/getting_started.html.markdown @@ -100,23 +100,23 @@ are used as a short way to identify resources, and a resource's display name in the Cloud Console will be the one defined in the `name` field. When linking resources in a Terraform config though, you'll primarily want to -use a different field, the `self_link` of a resource. Like `name`, nearly every -resource has a `self_link`. They look like: +use a different field, the `id` of a resource. Every Terraform resource has an +`id`. In the Google provider they generally look like: ``` -{{API base url}}/projects/{{your project}}/{{location type}}/{{location}}/{{resource type}}/{{name}} +projects/{{your project}}/{{location type}}/{{location}}/{{resource type}}/{{name}} ``` For example, the instance defined earlier in a project named `foo` will have -the `self_link`: +the `id`: ``` -https://www.googleapis.com/compute/v1/projects/foo/zones/us-central1-c/instances/terraform-instance +projects/foo/zones/us-central1-c/instances/terraform-instance ``` -A resource's `self_link` is a unique reference to that resource. When +A resource's `id` is a unique reference to that resource. When linking two resources in Terraform, you can use Terraform interpolation to -avoid typing out the self link! Let's use a `google_compute_network` to +avoid typing out the id! Let's use a `google_compute_network` to demonstrate. Add this block to your config: @@ -136,7 +136,7 @@ with a subnetwork in each region. Next, change the network of the network_interface { - # A default network is created for all GCP projects - network = "default" -+ network = google_compute_network.vpc_network.self_link ++ network = google_compute_network.vpc_network.id access_config { ``` @@ -211,7 +211,7 @@ resource "google_compute_instance" "vm_instance" { network_interface { # A default network is created for all GCP projects - network = google_compute_network.vpc_network.self_link + network = google_compute_network.vpc_network.id access_config { } } @@ -237,7 +237,7 @@ a virtual machine on Google Cloud Platform. The key concepts unique to GCP are: * and how to use a default `project` in your provider * What a resource being global, regional, or zonal means on GCP * and how to specify a default `region` and `zone` -* How GCP uses `name` and `self_link` to identify resources +* How GCP uses `name` and `id` to identify resources * How to add GCP service account credentials to Terraform Run `terraform destroy` to tear down your resources. diff --git a/website/docs/guides/version_6_upgrade.html.markdown b/website/docs/guides/version_6_upgrade.html.markdown index b0530361aeb..a32f7fa8fb5 100644 --- a/website/docs/guides/version_6_upgrade.html.markdown +++ b/website/docs/guides/version_6_upgrade.html.markdown @@ -1,7 +1,7 @@ --- -page_title: "Terraform Google Provider 6.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 6.0.0 Upgrade Guide" description: |- - Terraform Google Provider 6.0.0 Upgrade Guide + Terraform provider for Google Cloud 6.0.0 Upgrade Guide --- # Terraform Google Provider 6.0.0 Upgrade Guide diff --git a/website/docs/r/bigtable_gc_policy.html.markdown b/website/docs/r/bigtable_gc_policy.html.markdown index 135345f0891..b41494cd49a 100644 --- a/website/docs/r/bigtable_gc_policy.html.markdown +++ b/website/docs/r/bigtable_gc_policy.html.markdown @@ -166,6 +166,9 @@ The following arguments are supported: Possible values are: `ABANDON`. +* `ignore_warnings` - (Optional) Boolean for whether to allow ignoring warnings when updating the gc policy. + Setting this to `true` allows relaxing the gc policy for replicated clusters by up to 90 days, but keep in mind this may increase how long clusters are inconsistent. Make sure + you understand the risks listed at https://cloud.google.com/bigtable/docs/garbage-collection#increasing before setting this option. ----- `max_age` supports the following arguments: diff --git a/website/docs/r/cloudfunctions2_function.html.markdown b/website/docs/r/cloudfunctions2_function.html.markdown index c4926908baa..b0ae066915d 100644 --- a/website/docs/r/cloudfunctions2_function.html.markdown +++ b/website/docs/r/cloudfunctions2_function.html.markdown @@ -833,6 +833,160 @@ resource "google_cloudfunctions2_function" "function" { } ``` +## Example Usage - Cloudfunctions2 Abiu + + +```hcl +locals { + project = "my-project-name" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "gcf-sa" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "functions2-topic" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-gcf-source" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "function-source.zip" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "function" { + provider = google-beta + name = "gcf-function" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + automatic_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} +``` +## Example Usage - Cloudfunctions2 Abiu On Deploy + + +```hcl +locals { + project = "my-project-name" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "gcf-sa" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "functions2-topic" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-gcf-source" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "function-source.zip" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "function" { + provider = google-beta + name = "gcf-function" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + on_deploy_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} +``` ## Argument Reference @@ -930,6 +1084,16 @@ The following arguments are supported: (Optional) The fully-qualified name of the service account to be used for building the container. +* `automatic_update_policy` - + (Optional) + Security patches are applied automatically to the runtime without requiring + the function to be redeployed. + +* `on_deploy_update_policy` - + (Optional) + Security patches are only applied when a function is redeployed. + Structure is [documented below](#nested_on_deploy_update_policy). + The `source` block supports: @@ -991,6 +1155,12 @@ The following arguments are supported: Only trigger a build if the revision regex does NOT match the revision regex. +The `on_deploy_update_policy` block supports: + +* `runtime_version` - + (Output) + The runtime version which was used during latest function deployment. + The `service_config` block supports: * `service` - diff --git a/website/docs/r/composer_environment.html.markdown b/website/docs/r/composer_environment.html.markdown index 330f9e76b80..31735da2ccc 100644 --- a/website/docs/r/composer_environment.html.markdown +++ b/website/docs/r/composer_environment.html.markdown @@ -47,6 +47,11 @@ will not be able to find or manage many of these underlying resources automatica resource "google_composer_environment" "test" { name = "example-composer-env" region = "us-central1" + config { + software_config { + image_version = "composer-1-airflow-2" + } + } } ``` @@ -415,14 +420,40 @@ The following arguments are supported: They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: ``` + AIRFLOW_DATABASE_VERSION AIRFLOW_HOME - C_FORCE_ROOT + AIRFLOW_SRC_DIR + AIRFLOW_WEBSERVER + AUTO_GKE + CLOUDSDK_METRICS_ENVIRONMENT + CLOUD_LOGGING_ONLY + COMPOSER_ENVIRONMENT + COMPOSER_GKE_LOCATION + COMPOSER_GKE_NAME + COMPOSER_GKE_ZONE + COMPOSER_LOCATION + COMPOSER_OPERATION_UUID + COMPOSER_PYTHON_VERSION + COMPOSER_VERSION CONTAINER_NAME + C_FORCE_ROOT DAGS_FOLDER GCP_PROJECT + GCP_TENANT_PROJECT + GCSFUSE_EXTRACTED GCS_BUCKET GKE_CLUSTER_NAME + GKE_IN_TENANT + GOOGLE_APPLICATION_CREDENTIALS + MAJOR_VERSION + MINOR_VERSION + PATH + PIP_DISABLE_PIP_VERSION_CHECK + PORT + PROJECT_ID + PYTHONPYCACHEPREFIX SQL_DATABASE + SQL_HOST SQL_INSTANCE SQL_PASSWORD SQL_PROJECT diff --git a/website/docs/r/compute_address.html.markdown b/website/docs/r/compute_address.html.markdown index ab2b60ce2b6..0f2ef5f20cf 100644 --- a/website/docs/r/compute_address.html.markdown +++ b/website/docs/r/compute_address.html.markdown @@ -207,7 +207,6 @@ The following arguments are supported: * PRIVATE_SERVICE_CONNECT for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. - This should only be set when using an Internal address. * `network_tier` - diff --git a/website/docs/r/compute_backend_service.html.markdown b/website/docs/r/compute_backend_service.html.markdown index d0c3e124975..028c836d299 100644 --- a/website/docs/r/compute_backend_service.html.markdown +++ b/website/docs/r/compute_backend_service.html.markdown @@ -491,7 +491,6 @@ The following arguments are supported: instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and loadBalancingScheme set to INTERNAL_MANAGED. @@ -499,7 +498,6 @@ The following arguments are supported: * A regional backend service with loadBalancingScheme set to EXTERNAL (External Network Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced diff --git a/website/docs/r/compute_forwarding_rule.html.markdown b/website/docs/r/compute_forwarding_rule.html.markdown index 46bae687bd3..8cdff312e91 100644 --- a/website/docs/r/compute_forwarding_rule.html.markdown +++ b/website/docs/r/compute_forwarding_rule.html.markdown @@ -1352,7 +1352,6 @@ The following arguments are supported: `IPAddress` should be set to `0.0.0.0`. * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -1367,7 +1366,6 @@ The following arguments are supported: * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` - The forwarding rule's `target` or `backendService`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see @@ -1480,7 +1478,6 @@ The following arguments are supported: * For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. * `allow_global_access` - diff --git a/website/docs/r/compute_global_forwarding_rule.html.markdown b/website/docs/r/compute_global_forwarding_rule.html.markdown index cdafca00770..abfe030fd81 100644 --- a/website/docs/r/compute_global_forwarding_rule.html.markdown +++ b/website/docs/r/compute_global_forwarding_rule.html.markdown @@ -1214,7 +1214,6 @@ The following arguments are supported: * For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. @@ -1238,7 +1237,6 @@ The following arguments are supported: `IPAddress` should be set to `0.0.0.0`. * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -1253,7 +1251,6 @@ The following arguments are supported: * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` - The forwarding rule's `target`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see diff --git a/website/docs/r/compute_http_health_check.html.markdown b/website/docs/r/compute_http_health_check.html.markdown index 87db1f3eace..3ca3fbed655 100644 --- a/website/docs/r/compute_http_health_check.html.markdown +++ b/website/docs/r/compute_http_health_check.html.markdown @@ -22,7 +22,6 @@ description: |- An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP. - ~> **Note:** google_compute_http_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/website/docs/r/compute_https_health_check.html.markdown b/website/docs/r/compute_https_health_check.html.markdown index 10657377f21..3d478505f35 100644 --- a/website/docs/r/compute_https_health_check.html.markdown +++ b/website/docs/r/compute_https_health_check.html.markdown @@ -22,7 +22,6 @@ description: |- An HttpsHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTPS. - ~> **Note:** google_compute_https_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/website/docs/r/compute_managed_ssl_certificate.html.markdown b/website/docs/r/compute_managed_ssl_certificate.html.markdown index 54d690282c4..e0007405800 100644 --- a/website/docs/r/compute_managed_ssl_certificate.html.markdown +++ b/website/docs/r/compute_managed_ssl_certificate.html.markdown @@ -212,7 +212,6 @@ The following arguments are supported: first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. * `managed` - diff --git a/website/docs/r/compute_network_attachment.html.markdown b/website/docs/r/compute_network_attachment.html.markdown index 23555222aff..61c35407485 100644 --- a/website/docs/r/compute_network_attachment.html.markdown +++ b/website/docs/r/compute_network_attachment.html.markdown @@ -21,8 +21,6 @@ description: |- A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. To get more information about NetworkAttachment, see: @@ -35,7 +33,6 @@ To get more information about NetworkAttachment, see: ```hcl resource "google_compute_network_attachment" "default" { - provider = google-beta name = "basic-network-attachment" region = "us-central1" description = "basic network attachment description" @@ -55,13 +52,11 @@ resource "google_compute_network_attachment" "default" { } resource "google_compute_network" "default" { - provider = google-beta name = "basic-network" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "basic-subnetwork" region = "us-central1" @@ -70,7 +65,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_project" "rejected_producer_project" { - provider = google-beta project_id = "prj-rejected" name = "prj-rejected" org_id = "123456789" @@ -78,7 +72,6 @@ resource "google_project" "rejected_producer_project" { } resource "google_project" "accepted_producer_project" { - provider = google-beta project_id = "prj-accepted" name = "prj-accepted" org_id = "123456789" @@ -95,13 +88,11 @@ resource "google_project" "accepted_producer_project" { ```hcl resource "google_compute_network" "default" { - provider = google-beta name = "basic-network" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "basic-subnetwork" region = "us-central1" @@ -110,7 +101,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_compute_network_attachment" "default" { - provider = google-beta name = "basic-network-attachment" region = "us-central1" description = "my basic network attachment" @@ -120,7 +110,6 @@ resource "google_compute_network_attachment" "default" { } resource "google_compute_instance" "default" { - provider = google-beta name = "basic-instance" zone = "us-central1-a" machine_type = "e2-micro" diff --git a/website/docs/r/compute_region_backend_service.html.markdown b/website/docs/r/compute_region_backend_service.html.markdown index f669d11ebe3..de576b40df2 100644 --- a/website/docs/r/compute_region_backend_service.html.markdown +++ b/website/docs/r/compute_region_backend_service.html.markdown @@ -494,7 +494,6 @@ The following arguments are supported: instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and loadBalancingScheme set to INTERNAL_MANAGED. @@ -502,7 +501,6 @@ The following arguments are supported: * A regional backend service with loadBalancingScheme set to EXTERNAL (External Network Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced diff --git a/website/docs/r/compute_region_ssl_certificate.html.markdown b/website/docs/r/compute_region_ssl_certificate.html.markdown index 544d9eb18db..65210568883 100644 --- a/website/docs/r/compute_region_ssl_certificate.html.markdown +++ b/website/docs/r/compute_region_ssl_certificate.html.markdown @@ -203,7 +203,6 @@ The following arguments are supported: first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. * `region` - diff --git a/website/docs/r/compute_ssl_certificate.html.markdown b/website/docs/r/compute_ssl_certificate.html.markdown index fd6a946cbdf..db3205a65ee 100644 --- a/website/docs/r/compute_ssl_certificate.html.markdown +++ b/website/docs/r/compute_ssl_certificate.html.markdown @@ -195,7 +195,6 @@ The following arguments are supported: first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. * `project` - (Optional) The ID of the project in which the resource belongs. diff --git a/website/docs/r/compute_target_https_proxy.html.markdown b/website/docs/r/compute_target_https_proxy.html.markdown index c6191140996..b3564262a99 100644 --- a/website/docs/r/compute_target_https_proxy.html.markdown +++ b/website/docs/r/compute_target_https_proxy.html.markdown @@ -344,6 +344,14 @@ The following arguments are supported: Default value is `NONE`. Possible values are: `NONE`, `ENABLE`, `DISABLE`. +* `tls_early_data` - + (Optional) + Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. + Early Data allows a TLS resumption handshake to include the initial application payload + (a HTTP request) alongside the handshake, reducing the effective round trips to “zero”. + This applies to TLS 1.3 connections over TCP (HTTP/2) as well as over UDP (QUIC/h3). + Possible values are: `STRICT`, `PERMISSIVE`, `DISABLED`. + * `certificate_manager_certificates` - (Optional) URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. diff --git a/website/docs/r/compute_url_map.html.markdown b/website/docs/r/compute_url_map.html.markdown index ec9a1ae6ac3..32af5d6fa88 100644 --- a/website/docs/r/compute_url_map.html.markdown +++ b/website/docs/r/compute_url_map.html.markdown @@ -718,6 +718,102 @@ resource "google_storage_bucket" "static" { location = "US" } ``` + +## Example Usage - Url Map Custom Error Response Policy + + +```hcl +resource "google_compute_url_map" "urlmap" { + provider = google-beta + name = "urlmap" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 5xx responses will be catched + path = "/*" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx", "5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/login" + override_response_code = 404 + } + error_response_rule { + match_response_codes = ["503"] # Only a 503 response will be catched on path example + path = "/example" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx"] + path = "/register" + override_response_code = 401 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "login" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "health-check" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "error-backend-bucket" + bucket_name = google_storage_bucket.error.name + enable_cdn = true +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "static-asset-bucket" + location = "US" +} +``` ## Argument Reference @@ -763,6 +859,18 @@ The following arguments are supported: The list of named PathMatchers to use against the URL. Structure is [documented below](#nested_path_matcher). +* `default_custom_error_response_policy` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + For example, consider a UrlMap with the following configuration: + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + Structure is [documented below](#nested_default_custom_error_response_policy). + * `test` - (Optional) The list of expected URL mapping tests. Request to update this UrlMap will @@ -875,6 +983,18 @@ The following arguments are supported: An optional description of this resource. Provide this property when you create the resource. +* `default_custom_error_response_policy` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + For example, consider a UrlMap with the following configuration: + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + Structure is [documented below](#nested_default_custom_error_response_policy). + * `header_action` - (Optional) Specifies changes to request and response headers that need to take effect for @@ -923,6 +1043,48 @@ The following arguments are supported: Structure is [documented below](#nested_default_route_action). +The `default_custom_error_response_policy` block supports: + +* `error_response_rule` - + (Optional) + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + Structure is [documented below](#nested_error_response_rule). + +* `error_service` - + (Optional) + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). + + +The `error_response_rule` block supports: + +* `match_response_codes` - + (Optional) + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + +* `path` - + (Optional) + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + +* `override_response_code` - + (Optional) + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + The `header_action` block supports: * `request_headers_to_add` - @@ -992,6 +1154,17 @@ The following arguments are supported: does not include any text after the first ? or #, and those chars are not allowed here. +* `custom_error_response_policy` - + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) + customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. + For example, consider a UrlMap with the following configuration: + UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors + A PathRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in PathRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + customErrorResponsePolicy is supported only for global external Application Load Balancers. + Structure is [documented below](#nested_custom_error_response_policy). + * `route_action` - (Optional) In response to a matching path, the load balancer performs advanced routing @@ -1010,6 +1183,48 @@ The following arguments are supported: Structure is [documented below](#nested_url_redirect). +The `custom_error_response_policy` block supports: + +* `error_response_rule` - + (Optional) + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + Structure is [documented below](#nested_error_response_rule). + +* `error_service` - + (Optional) + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). + + +The `error_response_rule` block supports: + +* `match_response_codes` - + (Optional) + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + +* `path` - + (Optional) + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + +* `override_response_code` - + (Optional) + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + The `route_action` block supports: * `cors_policy` - @@ -2407,6 +2622,48 @@ The following arguments are supported: The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. The value must be between 0.0 and 100.0 inclusive. +The `default_custom_error_response_policy` block supports: + +* `error_response_rule` - + (Optional) + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + Structure is [documented below](#nested_error_response_rule). + +* `error_service` - + (Optional) + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). + + +The `error_response_rule` block supports: + +* `match_response_codes` - + (Optional) + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + +* `path` - + (Optional) + The full path to a file within backendBucket. For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters. + +* `override_response_code` - + (Optional) + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + The `test` block supports: * `description` - diff --git a/website/docs/r/container_aws_cluster.html.markdown b/website/docs/r/container_aws_cluster.html.markdown index 757073abfcb..c9df4e7855e 100644 --- a/website/docs/r/container_aws_cluster.html.markdown +++ b/website/docs/r/container_aws_cluster.html.markdown @@ -23,7 +23,7 @@ description: |- An Anthos cluster running on AWS. For more information, see: -* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) ## Example Usage - basic_aws_cluster A basic example of a containeraws cluster ```hcl diff --git a/website/docs/r/container_aws_node_pool.html.markdown b/website/docs/r/container_aws_node_pool.html.markdown index ed1540860fa..ece2f7fbe8f 100644 --- a/website/docs/r/container_aws_node_pool.html.markdown +++ b/website/docs/r/container_aws_node_pool.html.markdown @@ -23,7 +23,7 @@ description: |- An Anthos node pool running on AWS. For more information, see: -* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) ## Example Usage - basic_aws_cluster A basic example of a containeraws node pool ```hcl diff --git a/website/docs/r/container_azure_client.html.markdown b/website/docs/r/container_azure_client.html.markdown index 25e70e9b222..06101bdc054 100644 --- a/website/docs/r/container_azure_client.html.markdown +++ b/website/docs/r/container_azure_client.html.markdown @@ -23,7 +23,7 @@ description: |- AzureClient resources hold client authentication information needed by the Anthos Multi-Cloud API to manage Azure resources on your Azure subscription.When an AzureCluster is created, an AzureClient resource needs to be provided and all operations on Azure resources associated to that cluster will authenticate to Azure services using the given client.AzureClient resources are immutable and cannot be modified upon creation.Each AzureClient resource is bound to a single Azure Active Directory Application and tenant. For more information, see: -* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) ## Example Usage - basic_azure_client A basic example of a containerazure azure client ```hcl diff --git a/website/docs/r/container_azure_cluster.html.markdown b/website/docs/r/container_azure_cluster.html.markdown index 7b9f2461c6e..6ebe99359d4 100644 --- a/website/docs/r/container_azure_cluster.html.markdown +++ b/website/docs/r/container_azure_cluster.html.markdown @@ -23,7 +23,7 @@ description: |- An Anthos cluster running on Azure. For more information, see: -* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) ## Example Usage - basic_azure_cluster A basic example of a containerazure azure cluster ```hcl diff --git a/website/docs/r/container_azure_node_pool.html.markdown b/website/docs/r/container_azure_node_pool.html.markdown index 8cd9f7a0330..3e96fc730d7 100644 --- a/website/docs/r/container_azure_node_pool.html.markdown +++ b/website/docs/r/container_azure_node_pool.html.markdown @@ -23,7 +23,7 @@ description: |- An Anthos node pool running on Azure. For more information, see: -* [Multicloud overview](https://cloud.google.com/anthos/clusters/docs/multi-cloud) +* [Multicloud overview](https://cloud.google.com/kubernetes-engine/multi-cloud/docs) ## Example Usage - basic_azure_node_pool A basic example of a containerazure azure node pool ```hcl diff --git a/website/docs/r/data_fusion_instance.html.markdown b/website/docs/r/data_fusion_instance.html.markdown index c96c88d7737..e388f67d352 100644 --- a/website/docs/r/data_fusion_instance.html.markdown +++ b/website/docs/r/data_fusion_instance.html.markdown @@ -95,6 +95,55 @@ resource "google_compute_global_address" "private_ip_alloc" { network = google_compute_network.network.id } ``` + +## Example Usage - Data Fusion Instance Psc + + +```hcl +resource "google_data_fusion_instance" "psc_instance" { + name = "psc-instance" + region = "us-central1" + type = "BASIC" + private_instance = true + + network_config { + connection_type = "PRIVATE_SERVICE_CONNECT_INTERFACES" + private_service_connect_config { + network_attachment = google_compute_network_attachment.psc.id + unreachable_cidr_block = "192.168.0.0/25" + } + } + + +} + +resource "google_compute_network" "psc" { + name = "datafusion-psc-network" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc" { + name = "datafusion-psc-subnet" + region = "us-central1" + + network = google_compute_network.psc.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "psc" { + name = "datafusion-psc-attachment" + region = "us-central1" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.psc.self_link + ] +} +```
Open in Cloud Shell @@ -303,16 +352,50 @@ The following arguments are supported: The `network_config` block supports: * `ip_allocation` - - (Required) + (Optional) The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. * `network` - - (Required) + (Optional) Name of the network in the project with which the tenant project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network} +* `connection_type` - + (Optional) + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and + the corresponding tenant project from a predefined list of available connection modes. + If this field is unspecified for a private instance, VPC peering is used. + Possible values are: `VPC_PEERING`, `PRIVATE_SERVICE_CONNECT_INTERFACES`. + +* `private_service_connect_config` - + (Optional) + Optional. Configuration for Private Service Connect. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + Structure is [documented below](#nested_private_service_connect_config). + + +The `private_service_connect_config` block supports: + +* `network_attachment` - + (Optional) + Optional. The reference to the network attachment used to establish private connectivity. + It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + +* `unreachable_cidr_block` - + (Optional) + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. + This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. + If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. + +* `effective_unreachable_cidr_block` - + (Output) + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block is /25. The format of this field is governed by RFC 4632. + The `crypto_key_config` block supports: * `key_reference` - diff --git a/website/docs/r/healthcare_dataset.html.markdown b/website/docs/r/healthcare_dataset.html.markdown index 3aa53d3ad90..b21ddf7f86f 100644 --- a/website/docs/r/healthcare_dataset.html.markdown +++ b/website/docs/r/healthcare_dataset.html.markdown @@ -43,6 +43,50 @@ resource "google_healthcare_dataset" "default" { time_zone = "UTC" } ``` + +## Example Usage - Healthcare Dataset Cmek + + +```hcl +data "google_project" "project" {} + +resource "google_healthcare_dataset" "default" { + name = "example-dataset" + location = "us-central1" + time_zone = "UTC" + + encryption_spec { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.healthcare_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "example-key" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "example-keyring" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_binding" "healthcare_cmek_keyuser" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + ] +} +``` ## Argument Reference @@ -67,10 +111,24 @@ The following arguments are supported: "America/New_York" or empty, which defaults to UTC. This is used for parsing times in resources (e.g., HL7 messages) where no explicit timezone is specified. +* `encryption_spec` - + (Optional) + A nested object resource + Structure is [documented below](#nested_encryption_spec). + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. +The `encryption_spec` block supports: + +* `kms_key_name` - + (Optional) + KMS encryption key that is used to secure this dataset and its sub-resources. The key used for + encryption and the dataset must be in the same location. If empty, the default Google encryption + key will be used to secure this dataset. The format is + projects/{projectId}/locations/{locationId}/keyRings/{keyRingId}/cryptoKeys/{keyId}. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/vertex_ai_feature_group.html.markdown b/website/docs/r/vertex_ai_feature_group.html.markdown index 01e4179e8d4..adcd0bbf2ec 100644 --- a/website/docs/r/vertex_ai_feature_group.html.markdown +++ b/website/docs/r/vertex_ai_feature_group.html.markdown @@ -127,7 +127,7 @@ The following arguments are supported: * `entity_id_columns` - (Optional) - Columns to construct entityId / row keys. Currently only supports 1 entity_id_column. If not provided defaults to entityId. + Columns to construct entityId / row keys. If not provided defaults to entityId. The `big_query_source` block supports: diff --git a/website/docs/r/vertex_ai_feature_online_store.html.markdown b/website/docs/r/vertex_ai_feature_online_store.html.markdown index 4681bf0218a..301f3556ed2 100644 --- a/website/docs/r/vertex_ai_feature_online_store.html.markdown +++ b/website/docs/r/vertex_ai_feature_online_store.html.markdown @@ -53,16 +53,16 @@ resource "google_vertex_ai_feature_online_store" "feature_online_store" { } ``` -## Example Usage - Vertex Ai Featureonlinestore With Beta Fields Optimized +## Example Usage - Vertex Ai Featureonlinestore With Optimized ```hcl resource "google_vertex_ai_feature_online_store" "featureonlinestore" { - provider = google-beta + provider = google name = "example_feature_online_store_optimized" labels = { foo = "bar" @@ -78,7 +78,7 @@ resource "google_vertex_ai_feature_online_store" "featureonlinestore" { } data "google_project" "project" { - provider = google-beta + provider = google } ```
@@ -144,8 +144,8 @@ The following arguments are supported: Settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore * `dedicated_serving_endpoint` - - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) - The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. + (Optional) + The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to be set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Structure is [documented below](#nested_dedicated_serving_endpoint). * `embedding_management` -