Skip to content

Commit

Permalink
Add a logging_variant field to GKE node pools and to node pool defa…
Browse files Browse the repository at this point in the history
…ults for GKE clusters. (GoogleCloudPlatform#6744)

This PR implements the feature request from [Add GKE logging variant field for increasing log agent throughput #12667](hashicorp/terraform-provider-google#12667).

By adding a logging_variant field within the node_pool_defaults, GKE users will be able to select a cluster-wide default value for the logging agent of the node pools in a cluster. For example, by specifying
```terraform
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
  name               = "example-cluster"
  location           = "us-central1-f"
  initial_node_count = 1

  node_pool_defaults {
    node_config_defaults {
      logging_variant = "MAX_THROUGHPUT"
    }
  }
}
```
every newly created node pool in the cluster will have the max throughput logging agent unless this is explicitly overridden at the node pool level (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information).

GKE users will also be able to select a logging variant at the node pool level. For example, by specifying
```terraform
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
  name               = "example-cluster"
  location           = "us-central1-f"
  initial_node_count = 1

  node_pool_defaults {
    node_config_defaults {
      logging_variant = "DEFAULT"
    }
  }
}
resource "google_container_node_pool" "with_high_throughput_logging_variant" {
  name    = "example-node-pool-0"
  cluster = google_container_cluster.with_logging_variant_node_pool_default.name
}
resource "google_container_node_pool" "with_high_throughput_logging_variant" {
  name    = "example-node-pool-1"
  cluster = google_container_cluster.with_logging_variant_node_pool_default.name
  node_config {
    logging_variant = "MAX_THROUGHPUT"
  }
}
```
example-node-pool-0 (as well as the default node pool) will have the default logging agent (see the [GKE docs](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#high_throughput_for_all_nodes_in_a_cluster) for more information), but example-node-pool-1 will have the max throughput agent.
  • Loading branch information
giuliano-sider authored and hao-nan-li committed Dec 6, 2022
1 parent 7cdf98f commit 8ed44da
Show file tree
Hide file tree
Showing 6 changed files with 307 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ func clusterSchemaNodeConfig() *schema.Schema {
return nodeConfigSch
}

<% unless version == 'ga' -%>
// Defines default nodel pool settings for the entire cluster. These settings are
// overridden if specified on the specific NodePool object.
func clusterSchemaNodePoolDefaults() *schema.Schema {
Expand All @@ -118,16 +117,18 @@ func clusterSchemaNodePoolDefaults() *schema.Schema {
Description: `Subset of NodeConfig message that has defaults.`,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"gcfs_config": schemaGcfsConfig(false),
},
Schema: map[string]*schema.Schema{
<% unless version == 'ga' -%>
"gcfs_config": schemaGcfsConfig(false),
<% end -%>
"logging_variant": schemaLoggingVariant(),
},
},
},
},
},
}
}
<% end -%>

func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool {
// This diff gets applied in the cloud console if you specify
Expand Down Expand Up @@ -1164,9 +1165,9 @@ func resourceContainerCluster() *schema.Resource {
ConflictsWith: []string{"enable_autopilot"},
},

<% unless version == "ga" -%>
"node_pool_defaults": clusterSchemaNodePoolDefaults(),

<% unless version == "ga" -%>
"node_pool_auto_config": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -1917,11 +1918,9 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.NodeConfig = expandNodeConfig([]interface{}{})
}

<% unless version == 'ga' -%>
if v, ok := d.GetOk("node_pool_defaults"); ok {
cluster.NodePoolDefaults = expandNodePoolDefaults(v)
}
<% end -%>

if v, ok := d.GetOk("node_config"); ok {
cluster.NodeConfig = expandNodeConfig(v)
Expand Down Expand Up @@ -2356,11 +2355,9 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
}
<% end -%>

<% unless version == 'ga' -%>
if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil {
return err
}
<% end -%>

return nil
}
Expand Down Expand Up @@ -3317,6 +3314,29 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())
}

if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") {
if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok {
loggingVariant := v.(string)
req := &container.UpdateClusterRequest{
Update: &container.ClusterUpdate{
DesiredNodePoolLoggingConfig: &container.NodePoolLoggingConfig{
VariantConfig: &container.LoggingVariantConfig{
Variant: loggingVariant,
},
},
},
}

updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.")
// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] GKE cluster %s node pool logging configuration defaults have been updated", d.Id())
}
}

<% unless version == 'ga' -%>
if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.gcfs_config") {
if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.gcfs_config"); ok {
Expand Down Expand Up @@ -4344,7 +4364,6 @@ func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *co
}
}

<% unless version == 'ga' -%>
func expandNodePoolDefaults(configured interface{}) *container.NodePoolDefaults {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
Expand All @@ -4364,13 +4383,12 @@ func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interfa
}

result := make(map[string]interface{})
if c.NodeConfigDefaults != nil && c.NodeConfigDefaults.GcfsConfig != nil {
result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults)
if c.NodeConfigDefaults != nil {
result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults)
}

return []map[string]interface{}{result}
}
<% end -%>

<% unless version == 'ga' -%>
func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoConfig {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1177,6 +1177,45 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
}

if d.HasChange(prefix + "node_config") {

if d.HasChange(prefix + "node_config.0.logging_variant") {
if v, ok := d.GetOk(prefix + "node_config.0.logging_variant"); ok {
loggingVariant := v.(string)
req := &container.UpdateNodePoolRequest{
Name: name,
LoggingConfig: &container.NodePoolLoggingConfig{
VariantConfig: &container.LoggingVariantConfig{
Variant: loggingVariant,
},
},
}

updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()
if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool logging_variant", userAgent,
timeout)
}

// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated logging_variant for node pool %s", name)
}
}

if d.HasChange(prefix + "node_config.0.tags") {
req := &container.UpdateNodePoolRequest{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1001,6 +1001,83 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
})
}

func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_in_node_config",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withLoggingVariantInNodePool(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_in_node_pool",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerCluster_withLoggingVariantUpdates(t *testing.T) {
t.Parallel()
clusterName := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT"),
},
{
ResourceName: "google_container_cluster.with_logging_variant_node_pool_default",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

<% unless version == 'ga' -%>
func TestAccContainerCluster_withNodePoolDefaults(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -4301,6 +4378,53 @@ resource "google_container_cluster" "with_node_config" {
`, clusterName)
}

func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_in_node_config" {
name = "%s"
location = "us-central1-f"
initial_node_count = 1

node_config {
logging_variant = "%s"
}
}
`, clusterName, loggingVariant)
}

func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_in_node_pool" {
name = "%s"
location = "us-central1-f"

node_pool {
name = "%s"
initial_node_count = 1
node_config {
logging_variant = "%s"
}
}
}
`, clusterName, nodePoolName, loggingVariant)
}

func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant_node_pool_default" {
name = "%s"
location = "us-central1-f"
initial_node_count = 1

node_pool_defaults {
node_config_defaults {
logging_variant = "%s"
}
}
}
`, clusterName, loggingVariant)
}

<% unless version == 'ga' -%>
func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled string) string {
return fmt.Sprintf(`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,45 @@ func TestAccContainerNodePool_noName(t *testing.T) {
})
}

func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10))
nodePool := fmt.Sprintf("tf-test-nodepool-%s", randString(t, 10))

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"),
},
{
ResourceName: "google_container_node_pool.with_logging_variant",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "MAX_THROUGHPUT"),
},
{
ResourceName: "google_container_node_pool.with_logging_variant",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"),
},
{
ResourceName: "google_container_node_pool.with_logging_variant",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1267,6 +1306,26 @@ resource "google_container_node_pool" "np" {
`, cluster, np)
}

func testAccContainerNodePool_withLoggingVariant(cluster, np, loggingVariant string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_logging_variant" {
name = "%s"
location = "us-central1-a"
initial_node_count = 1
}

resource "google_container_node_pool" "with_logging_variant" {
name = "%s"
location = "us-central1-a"
cluster = google_container_cluster.with_logging_variant.name
initial_node_count = 1
node_config {
logging_variant = "%s"
}
}
`, cluster, np, loggingVariant)
}

func testAccContainerNodePool_basicWithClusterId(cluster, np string) string {
return fmt.Sprintf(`
provider "google" {
Expand Down

0 comments on commit 8ed44da

Please sign in to comment.