Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

promote GKE database encryption to ga #6701

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/3699.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
container: Promoted `google_container_cluster` `database_encryption` to GA.
```
8 changes: 4 additions & 4 deletions google/resource_bigquery_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -336,13 +336,13 @@ func resourceBigQueryTable() *schema.Resource {
Description: `Number of milliseconds for which to keep the storage for a partition.`,
},

// Type: [Required] The supported types are DAY and HOUR, which will generate
// one partition per day or hour based on data loading time.
// Type: [Required] The only type supported is DAY, which will generate
// one partition per day based on data loading time.
"type": {
Type: schema.TypeString,
Required: true,
Description: `The supported types are DAY and HOUR, which will generate one partition per day or hour based on data loading time.`,
ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR"}, false),
Description: `The only type supported is DAY, which will generate one partition per day based on data loading time.`,
ValidateFunc: validation.StringInSlice([]string{"DAY"}, false),
},

// Field: [Optional] The field used to determine how to create a time-based
Expand Down
92 changes: 2 additions & 90 deletions google/resource_bigquery_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func TestAccBigQueryTable_Basic(t *testing.T) {
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigQueryTableDailyTimePartitioning(datasetID, tableID),
Config: testAccBigQueryTable(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
Expand Down Expand Up @@ -64,37 +64,6 @@ func TestAccBigQueryTable_Kms(t *testing.T) {
})
}

func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) {
t.Parallel()

datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10))
tableID := fmt.Sprintf("tf_test_%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccBigQueryTableUpdated(datasetID, tableID),
},
{
ResourceName: "google_bigquery_table.test",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccBigQueryTable_HivePartitioning(t *testing.T) {
t.Parallel()
bucketName := testBucketName(t)
Expand Down Expand Up @@ -292,7 +261,7 @@ func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.St
}
}

func testAccBigQueryTableDailyTimePartitioning(datasetID, tableID string) string {
func testAccBigQueryTable(datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
dataset_id = "%s"
Expand Down Expand Up @@ -349,63 +318,6 @@ EOH
`, datasetID, tableID)
}

func testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
dataset_id = "%s"
}

resource "google_bigquery_table" "test" {
table_id = "%s"
dataset_id = google_bigquery_dataset.test.dataset_id

time_partitioning {
type = "HOUR"
field = "ts"
require_partition_filter = true
}
clustering = ["some_int", "some_string"]
schema = <<EOH
[
{
"name": "ts",
"type": "TIMESTAMP"
},
{
"name": "some_string",
"type": "STRING"
},
{
"name": "some_int",
"type": "INTEGER"
},
{
"name": "city",
"type": "RECORD",
"fields": [
{
"name": "id",
"type": "INTEGER"
},
{
"name": "coord",
"type": "RECORD",
"fields": [
{
"name": "lon",
"type": "FLOAT"
}
]
}
]
}
]
EOH

}
`, datasetID, tableID)
}

func testAccBigQueryTableKms(cryptoKeyName, datasetID, tableID string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "test" {
Expand Down
58 changes: 58 additions & 0 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -879,6 +879,32 @@ func resourceContainerCluster() *schema.Resource {
},
},

"database_encryption": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ForceNew: true,
Computed: true,
Description: `Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"state": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"ENCRYPTED", "DECRYPTED"}, false),
Description: `ENCRYPTED or DECRYPTED.`,
},
"key_name": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Description: `The key to use to encrypt/decrypt secrets.`,
},
},
},
},

"resource_usage_export_config": {
Type: schema.TypeList,
MaxItems: 1,
Expand Down Expand Up @@ -1112,6 +1138,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v)
}

if v, ok := d.GetOk("database_encryption"); ok {
cluster.DatabaseEncryption = expandDatabaseEncryption(v)
}

if v, ok := d.GetOk("workload_identity_config"); ok {
cluster.WorkloadIdentityConfig = expandWorkloadIdentityConfig(v)
}
Expand Down Expand Up @@ -1323,6 +1353,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return err
}

if err := d.Set("database_encryption", flattenDatabaseEncryption(cluster.DatabaseEncryption)); err != nil {
return err
}

d.Set("resource_labels", cluster.ResourceLabels)
d.Set("label_fingerprint", cluster.LabelFingerprint)

Expand Down Expand Up @@ -2353,6 +2387,18 @@ func expandVerticalPodAutoscaling(configured interface{}) *containerBeta.Vertica
}
}

func expandDatabaseEncryption(configured interface{}) *containerBeta.DatabaseEncryption {
l := configured.([]interface{})
if len(l) == 0 {
return nil
}
config := l[0].(map[string]interface{})
return &containerBeta.DatabaseEncryption{
State: config["state"].(string),
KeyName: config["key_name"].(string),
}
}

func expandWorkloadIdentityConfig(configured interface{}) *containerBeta.WorkloadIdentityConfig {
l := configured.([]interface{})
if len(l) == 0 || l[0] == nil {
Expand Down Expand Up @@ -2676,6 +2722,18 @@ func flattenResourceUsageExportConfig(c *containerBeta.ResourceUsageExportConfig
}
}

func flattenDatabaseEncryption(c *containerBeta.DatabaseEncryption) []map[string]interface{} {
if c == nil {
return nil
}
return []map[string]interface{}{
{
"state": c.State,
"key_name": c.KeyName,
},
}
}

func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)

Expand Down
64 changes: 63 additions & 1 deletion google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1283,6 +1283,35 @@ func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) {
})
}

func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) {
t.Parallel()

clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))

// Use the bootstrapped KMS key so we can avoid creating keys needlessly
// as they will pile up in the project because they can not be completely
// deleted. Also, we need to create the key in the same location as the
// cluster as GKE does not support the "global" location for KMS keys.
// See https://cloud.google.com/kubernetes-engine/docs/how-to/encrypting-secrets#creating_a_key
kmsData := BootstrapKMSKeyInLocation(t, "us-central1")

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withDatabaseEncryption(clusterName, kmsData),
},
{
ResourceName: "google_container_cluster.with_database_encryption",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1620,7 +1649,7 @@ func testAccContainerCluster_updateAddons(projectID string, clusterName string)
return fmt.Sprintf(`
data "google_project" "project" {
project_id = "%s"
}
}

resource "google_container_cluster" "primary" {
name = "%s"
Expand Down Expand Up @@ -2882,6 +2911,39 @@ resource "google_container_cluster" "with_resource_labels" {
`, location)
}

func testAccContainerCluster_withDatabaseEncryption(clusterName string, kmsData bootstrappedKMS) string {
return fmt.Sprintf(`
data "google_project" "project" {
}

data "google_iam_policy" "test_kms_binding" {
binding {
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"

members = [
"serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com",
]
}
}

resource "google_kms_key_ring_iam_policy" "test_key_ring_iam_policy" {
key_ring_id = "%[1]s"
policy_data = data.google_iam_policy.test_kms_binding.policy_data
}

resource "google_container_cluster" "with_database_encryption" {
name = "%[3]s"
location = "us-central1-a"
initial_node_count = 1

database_encryption {
state = "ENCRYPTED"
key_name = "%[2]s"
}
}
`, kmsData.KeyRing.Name, kmsData.CryptoKey.Name, clusterName)
}

func testAccContainerCluster_withMasterAuthorizedNetworksDisabled(containerNetName string, clusterName string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
Expand Down
2 changes: 1 addition & 1 deletion website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ on the current needs of the cluster's workload. See the
[guide to using Node Auto-Provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning)
for more details. Structure is documented below.

* `database_encryption` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)).
* `database_encryption` - (Optional)
Structure is documented below.

* `description` - (Optional) Description of the cluster.
Expand Down