From 546c7304907243945935e4cd578a39ac8d628113 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Wed, 13 Jul 2022 19:03:39 -0300 Subject: [PATCH 01/27] Initial commit --- 1-org/envs/shared/log_sinks.tf | 127 +++------ 1-org/modules/centralized-logging/README.md | 100 +++++++ 1-org/modules/centralized-logging/main.tf | 264 ++++++++++++++++++ 1-org/modules/centralized-logging/outputs.tf | 34 +++ .../modules/centralized-logging/variables.tf | 165 +++++++++++ 1-org/modules/centralized-logging/versions.tf | 19 ++ 6 files changed, 614 insertions(+), 95 deletions(-) create mode 100644 1-org/modules/centralized-logging/README.md create mode 100644 1-org/modules/centralized-logging/main.tf create mode 100644 1-org/modules/centralized-logging/outputs.tf create mode 100644 1-org/modules/centralized-logging/variables.tf create mode 100644 1-org/modules/centralized-logging/versions.tf diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index 02dccc000..5cd6b3b71 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -15,106 +15,43 @@ */ locals { - parent_resource_id = var.parent_folder != "" ? var.parent_folder : var.org_id - parent_resource_type = var.parent_folder != "" ? "folder" : "organization" - main_logs_filter = < 0 ? "project" : (var.parent_folder != "" ? "folder" : "organization") } -/****************************************** - Send logs to Pub\Sub -*****************************************/ - -module "log_export_to_pubsub" { - source = "terraform-google-modules/log-export/google" - version = "~> 7.3.0" - destination_uri = module.pubsub_destination.destination_uri - filter = local.main_logs_filter - log_sink_name = "sk-c-logging-pub" - parent_resource_id = local.parent_resource_id - parent_resource_type = local.parent_resource_type - include_children = true - unique_writer_identity = true -} +module "centralized_logging" { + source = "../../modules/centralized-logging" + projects_ids = local.projects_ids + logging_destination_project_id = module.org_audit_logs.project_id + kms_project_id = module.org_audit_logs.project_id + bucket_name = "bkt-logging-${module.org_audit_logs.project_id}" + bigquery_name = "bq-logging-${module.org_audit_logs.project_id}" + pubsub_name = "ps-logging-${module.org_audit_logs.project_id}" + logging_location = var.default_region + delete_contents_on_destroy = var.delete_contents_on_destroy + // key_rotation_period_seconds = local.key_rotation_period_seconds -module "pubsub_destination" { - source = "terraform-google-modules/log-export/google//modules/pubsub" - version = "~> 7.3.0" - project_id = module.org_audit_logs.project_id - topic_name = "tp-org-logs-${random_string.suffix.result}" - log_sink_writer_identity = module.log_export_to_pubsub.writer_identity - create_subscriber = true + depends_on = [ + module.org_audit_logs, + module.org_billing_logs, + module.org_secrets, + module.interconnect, + module.scc_notifications, + module.dns_hub, + module.base_network_hub, + module.restricted_network_hub + ] } /****************************************** diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md new file mode 100644 index 000000000..c7ce69855 --- /dev/null +++ b/1-org/modules/centralized-logging/README.md @@ -0,0 +1,100 @@ +# Centralized Logging Module + +This module handles logging configuration enabling destination to: buckets, Big Query or Pub/Sub. + +## Usage + +Before using this module, one should get familiar with the `google_dataflow_flex_template_job`’s [Note on "destroy"/"apply"](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/dataflow_flex_template_job#note-on-destroy--apply) as the behavior is atypical when compared to other resources. + +## Requirements + +These sections describe requirements for running this module. + +### Software + +Install the following dependencies: + +- [Google Cloud SDK](https://cloud.google.com/sdk/install) version 357.0.0 or later. +- [Terraform](https://www.terraform.io/downloads.html) version 0.13.7 or later. + +### Deployer entity + +To provision the resources of this module, create a service account +with the following IAM roles: + +- Dataflow Developer:`roles/dataflow.developer`. + +### APIs + +The following APIs must be enabled in the project where the service account was created: + +- BigQuery API: `bigquery.googleapis.com`. +- Cloud Key Management Service (KMS) API: `cloudkms.googleapis.com`. +- Google Cloud Storage JSON API:`storage-api.googleapis.com`. +- Compute Engine API: `compute.googleapis.com`. +- Dataflow API: `dataflow.googleapis.com`. + +Any others APIs you pipeline may need. + +### Assumption + +One assumption is that, before using this module, you already have a working Dataflow flex job template(s) in a GCS location. +If you are not using public IPs, you need to [Configure Private Google Access](https://cloud.google.com/vpc/docs/configure-private-google-access) +on the VPC used by Dataflow. + +This is a simple usage: + +```hcl +module "dataflow-flex-job" { + source = "terraform-google-modules/secured-data-warehouse/google//modules/dataflow-flex-job" + version = "~> 0.1" + + project_id = "" + region = "us-east4" + name = "dataflow-flex-job-00001" + container_spec_gcs_path = "gs://" + staging_location = "gs://" + temp_location = "gs://" + subnetwork_self_link = "" + kms_key_name = "" + service_account_email = "" + + parameters = { + firstParameter = "ONE", + secondParameter = "TWO + } +} +``` + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| audit\_logs\_table\_delete\_contents\_on\_destroy | (Optional) If set to true, delete all the tables in the dataset when destroying the resource; otherwise, destroying the resource will fail if tables are present. | `bool` | `false` | no | +| audit\_logs\_table\_expiration\_days | Period before tables expire for all audit logs in milliseconds. Default is 30 days. | `number` | `30` | no | +| bigquery\_options | (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables. |
object({
use_partitioned_tables = bool
})
| `null` | no | +| data\_access\_logs\_enabled | Enable Data Access logs of types DATA\_READ, DATA\_WRITE for all GCP services in the projects specified in the provided `projects_ids` map. Enabling Data Access logs might result in your organization being charged for the additional logs usage. See https://cloud.google.com/logging/docs/audit#data-access The ADMIN\_READ logs are enabled by default. | `bool` | `false` | no | +| delete\_contents\_on\_destroy | (Optional) If set to true, disable the prevent destroy protection in the KMS keys. | `bool` | `false` | no | +| exclusions | (Optional) A list of sink exclusion filters. |
list(object({
name = string,
description = string,
filter = string,
disabled = bool
}))
| `[]` | no | +| key\_rotation\_period\_seconds | Rotation period for keys. The default value is 30 days. | `string` | `"2592000s"` | no | +| kms\_key\_protection\_level | The protection level to use when creating a key. Possible values: ["SOFTWARE", "HSM"] | `string` | `"HSM"` | no | +| kms\_project\_id | The ID of the project in which the Cloud KMS keys will be created. | `string` | n/a | yes | +| labels | (Optional) Labels attached to Data Warehouse resources. | `map(string)` | `{}` | no | +| log\_export\_storage\_force\_destroy | (Optional) If set to true, delete all contents when destroying the resource; otherwise, destroying the resource will fail if contents are present. | `bool` | `false` | no | +| log\_export\_storage\_retention\_policy | Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. |
object({
is_locked = bool
retention_period_days = number
})
| `null` | no | +| log\_export\_storage\_versioning | (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. | `bool` | `false` | no | +| logging\_create\_target | (Optional) If set to true, the module will create a container (bigquery, storage, or pubsub); otherwise, the module will consider that the container already exists. | `bool` | `false` | no | +| logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | +| logging\_location | A valid location for the bucket and KMS key that will be deployed. | `string` | `"us-east4"` | no | +| logging\_target\_name | The name of the logging container (bigquery, storage, or pubsub) that will store the logs. | `string` | `""` | no | +| logging\_target\_type | Resource type of the resource that will store the logs. Must be: bigquery, storage, or pubsub | `string` | n/a | yes | +| resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization or folder | `string` | n/a | yes | +| resources | Export logs from the specified resources. | `map(string)` | n/a | yes | +| sink\_filter | The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. | `string` | `" logName: /logs/cloudaudit.googleapis.com%2Factivity OR\n logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR\n logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR\n logName: /logs/compute.googleapis.com%2Fvpc_flows OR\n logName: /logs/compute.googleapis.com%2Ffirewall OR\n logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency\n"` | no | + +## Outputs + +No output. + + diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf new file mode 100644 index 000000000..48e10987d --- /dev/null +++ b/1-org/modules/centralized-logging/main.tf @@ -0,0 +1,264 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + // Get resources map first entry to create de unique log_export destination + log_export_resource_dest = [for k, v in var.resources : v if k == keys(var.resources)[0]][0] + + /** + * Create a new resources map without the first entry (explained above) to create + * log_export and assing log writer identities required permissions + */ + log_export_other_resources = { for k, v in var.resources : k => v if k != keys(var.resources)[0] } + + destination_uri = var.logging_target_type == "bigquery" ? module.destination_bigquery[0].destination_uri : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].destination_uri : module.destination_storage[0].destination_uri + + # Bigquery sink options + bigquery_options = var.logging_target_type == "bigquery" && var.bigquery_options != null ? var.bigquery_options : null + + //----------------------------- + // new_bucket_name = "${var.bucket_name}-${random_string.suffix.result}" + // bucket_name = var.create_bucket ? module.logging_bucket[0].bucket.name : var.bucket_name + + // destination_uri2 = "storage.googleapis.com/${local.bucket_name}" + storage_sa = data.google_storage_project_service_account.gcs_account.email_address + logging_keyring_name = "logging_keyring_${random_string.suffix.result}" + logging_key_name = "logging_key" + keys = [local.logging_key_name] + enabling_data_logs = var.data_access_logs_enabled ? ["DATA_WRITE", "DATA_READ"] : [] + parent_resource_ids = [for parent_resource_id in local.log_exports[*].parent_resource_id : parent_resource_id] + + log_exports_first = toset([ + for value in module.log_export : value + ]) + + log_exports_others = toset([ + for value in module.log_export_other_resources : value + ]) + + log_exports = setunion(local.log_exports_first, local.log_exports_others) +} + +resource "random_string" "suffix" { + length = 8 + upper = false + special = false +} + +data "google_storage_project_service_account" "gcs_account" { + project = var.logging_destination_project_id +} + +module "cmek" { + source = "terraform-google-modules/kms/google" + version = "~> 2.0.1" + + count = var.create_bucket ? 1 : 0 + + project_id = var.kms_project_id + labels = var.labels + location = var.logging_location + keyring = local.logging_keyring_name + key_rotation_period = var.key_rotation_period_seconds + keys = local.keys + key_protection_level = var.kms_key_protection_level + set_encrypters_for = local.keys + set_decrypters_for = local.keys + encrypters = ["serviceAccount:${local.storage_sa}"] + decrypters = ["serviceAccount:${local.storage_sa}"] + prevent_destroy = !var.delete_contents_on_destroy +} + +module "log_export" { + source = "terraform-google-modules/log-export/google" + version = "~> 7.3.0" + // version = "~> 7.1.0" + + destination_uri = local.destination_uri + filter = var.sink_filter + log_sink_name = "sk-logging-to-${var.logging_destination_project_id}" + parent_resource_id = local.log_export_resource_dest + parent_resource_type = var.resource_type + unique_writer_identity = true + include_children = true + bigquery_options = local.bigquery_options + exclusions = var.exclusions +} + +module "log_export_other_resources" { + source = "terraform-google-modules/log-export/google" + version = "~> 7.3.0" + // version = "~> 7.1.0" + + for_each = local.log_export_other_resources + + destination_uri = local.destination_uri + filter = var.sink_filter + log_sink_name = "sk-logging-to-${var.logging_destination_project_id}" + parent_resource_id = each.value + parent_resource_type = var.resource_type + unique_writer_identity = true +} + +/****************************************** + Send logs to BigQuery +*****************************************/ +module "destination_bigquery" { + source = "terraform-google-modules/log-export/google//modules/bigquery" + version = "~> 7.3.0" + + count = var.logging_target_type == "bigquery" ? 1 : 0 + + project_id = var.logging_destination_project_id + dataset_name = "bq_logging" + log_sink_writer_identity = each.value.writer_identity + expiration_days = var.audit_logs_table_expiration_days + delete_contents_on_destroy = var.audit_logs_table_delete_contents_on_destroy +} + +#-----------------------------------------# +# Bigquery Service account IAM membership # +#-----------------------------------------# +resource "google_project_iam_member" "bigquery_sink_member" { + for_each = var.logging_target_type == "bigquery" ? local.log_export_other_resources : {} + + project = var.logging_destination_project_id + role = "roles/bigquery.dataEditor" + member = module.log_export_other_resources[each.key].writer_identity +} + +/****************************************** + Send logs to Storage +*****************************************/ +module "destination_storage" { + source = "terraform-google-modules/log-export/google//modules/storage" + version = "~> 7.3.0" + + count = var.logging_target_type == "storage" ? 1 : 0 + + project_id = var.logging_destination_project_id + storage_bucket_name = "bkt-${var.logging_destination_project_id}-org-logs-${random_string.suffix.result}" + log_sink_writer_identity = module.log_export.writer_identity + uniform_bucket_level_access = true + location = var.logging_location + // retention_policy = var.log_export_retention_policy + // force_destroy = var.log_export_force_destroy + // versioning = var.log_export_versioning +} + +#----------------------------------------# +# Storage Service account IAM membership # +#----------------------------------------# +resource "google_storage_bucket_iam_member" "storage_sink_member" { + for_each = var.logging_target_type == "storage" ? module.log_export_other_resources : {} + + bucket = module.destination_storage[0].resource_name + role = "roles/storage.objectCreator" + member = each.value.writer_identity +} + +/****************************************** + Send logs to Pub\Sub +*****************************************/ +module "destination_pubsub" { + source = "terraform-google-modules/log-export/google//modules/pubsub" + version = "~> 7.3.0" + + count = var.logging_target_type == "pubsub" ? 1 : 0 + + project_id = var.logging_destination_project_id + topic_name = "tp-org-logs-${random_string.suffix.result}" + log_sink_writer_identity = module.log_export.writer_identity + create_subscriber = true +} + +#---------------------------------------# +# Pubsub Service account IAM membership # +#---------------------------------------# +resource "google_pubsub_topic_iam_member" "pubsub_sink_member" { + for_each = var.logging_target_type == "pubsub" ? module.log_export_other_resources : {} + + project = var.logging_destination_project_id + topic = module.destination_pubsub[0].resource_name + role = "roles/pubsub.publisher" + member = each.value.writer_identity +} + + + +//------------------------------------------------ +/* +module "log_export_to_bucket" { + source = "terraform-google-modules/log-export/google" + version = "~> 7.1.0" + + for_each = lookup(local.resources_map, var.logging_target_type, {}) + + destination_uri = local.destination_uri + filter = var.sink_filter + log_sink_name = "sk-dwh-logging-bkt2" + parent_resource_id = each.value + parent_resource_type = var.resource_type + unique_writer_identity = true +} + +module "logging_bucket" { + source = "terraform-google-modules/cloud-storage/google//modules/simple_bucket" + version = "~> 2.1" + + count = var.create_bucket ? 1 : 0 + + name = local.new_bucket_name + project_id = var.logging_destination_project_id + location = var.logging_location + force_destroy = true + encryption = { + default_kms_key_name = module.cmek[0].keys[local.logging_key_name] + } +} + +resource "google_storage_bucket_iam_member" "storage_sink_member2" { + for_each = module.log_export_to_bucket + + bucket = local.bucket_name + role = "roles/storage.objectCreator" + member = each.value.writer_identity +} + +/* TODO quando nao for projeto? */ +/* +resource "google_project_iam_audit_config" "project_config" { + for_each = var.resources + + project = "projects/${each.value}" + service = "allServices" + + ################################################################################################### + ### Audit logs can generate costs, to know more about it, + ### check the official documentation: https://cloud.google.com/stackdriver/pricing#logging-costs + ### To know more about audit logs, you can find more infos + ### here https://cloud.google.com/logging/docs/audit/configure-data-access + ### To enable DATA_READ and DATA_WRITE audit logs, set `data_access_logs_enabled` to true + ### ADMIN_READ logs are enabled by default. + #################################################################################################### + dynamic "audit_log_config" { + for_each = setunion(local.enabling_data_logs, ["ADMIN_READ"]) + content { + log_type = audit_log_config.key + } + } +} +*/ diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf new file mode 100644 index 000000000..2e6b3e2e7 --- /dev/null +++ b/1-org/modules/centralized-logging/outputs.tf @@ -0,0 +1,34 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +output "sinks" { + description = "The list of sinks that were created." + value = toset([ + for value in module.log_exports : value + ]) +} + +output "bucket_name" { + description = "The name of the bucket that will store the exported logs." + value = local.bucket_name +} + +output "sink_projects" { + description = "The list of the projects that the sink was created." + value = local.parent_resource_ids +} +*/ diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf new file mode 100644 index 000000000..95ae29356 --- /dev/null +++ b/1-org/modules/centralized-logging/variables.tf @@ -0,0 +1,165 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "resource_type" { + description = "Resource type of the resource that will export logs to destination. Must be: project, organization or folder" + type = string + + validation { + condition = contains(["project", "folder", "organization"], var.resource_type) + error_message = "The resource_type value must be: project, organization or folder." + } +} + +variable "resources" { + description = "Export logs from the specified resources." + type = map(string) + + validation { + condition = length(var.resources) > 0 + error_message = "The resources map should have at least 1 item." + } +} + +variable "logging_destination_project_id" { + description = "The ID of the project that will have the resources where the logs will be created." + type = string +} + +variable "logging_target_type" { + description = "Resource type of the resource that will store the logs. Must be: bigquery, storage, or pubsub" + type = string + + validation { + condition = contains(["bigquery", "storage", "pubsub"], var.logging_target_type) + error_message = "The logging_target_type value must be: bigquery, storage, or pubsub." + } +} + +variable "logging_location" { + description = "A valid location for the bucket and KMS key that will be deployed." + type = string + default = "us-east4" +} + +variable "logging_sink_name" { + description = "The name of the log sink to be created." + type = string +} + +variable "logging_sink_filter" { + description = "The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs." + type = string + default = "" +} + +variable "logging_target_name" { + description = "The name of the logging container (bigquery, storage, or pubsub) that will store the logs." + type = string + default = "" +} + +variable "logging_destination_uri" { + description = "The self_link URI of the destination resource (This is available as an output coming from one of the destination submodules)" + type = string +} + +variable "kms_project_id" { + description = "The ID of the project in which the Cloud KMS keys will be created." + type = string +} + +variable "delete_contents_on_destroy" { + description = "(Optional) If set to true, disable the prevent destroy protection in the KMS keys." + type = bool + default = false +} + +variable "key_rotation_period_seconds" { + description = "Rotation period for keys. The default value is 30 days." + type = string + default = "2592000s" +} + +variable "kms_key_protection_level" { + description = "The protection level to use when creating a key. Possible values: [\"SOFTWARE\", \"HSM\"]" + type = string + default = "HSM" +} + +variable "data_access_logs_enabled" { + description = "Enable Data Access logs of types DATA_READ, DATA_WRITE for all GCP services in the projects specified in the provided `projects_ids` map. Enabling Data Access logs might result in your organization being charged for the additional logs usage. See https://cloud.google.com/logging/docs/audit#data-access The ADMIN_READ logs are enabled by default." + type = bool + default = false +} + +variable "audit_logs_table_expiration_days" { + description = "Period before tables expire for all audit logs in milliseconds. Default is 30 days." + type = number + default = 30 +} + +variable "log_export_storage_force_destroy" { + description = "(Optional) If set to true, delete all contents when destroying the resource; otherwise, destroying the resource will fail if contents are present." + type = bool + default = false +} + +variable "log_export_storage_versioning" { + description = "(Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted." + type = bool + default = false +} + +variable "audit_logs_table_delete_contents_on_destroy" { + description = "(Optional) If set to true, delete all the tables in the dataset when destroying the resource; otherwise, destroying the resource will fail if tables are present." + type = bool + default = false +} + +variable "log_export_storage_retention_policy" { + description = "Configuration of the bucket's data retention policy for how long objects in the bucket should be retained." + type = object({ + is_locked = bool + retention_period_days = number + }) + default = null +} + +variable "bigquery_options" { + description = "(Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables." + type = object({ + use_partitioned_tables = bool + }) + default = null +} + +variable "exclusions" { + description = "(Optional) A list of sink exclusion filters." + type = list(object({ + name = string, + description = string, + filter = string, + disabled = bool + })) + default = [] +} + +variable "labels" { + description = "(Optional) Labels attached to Data Warehouse resources." + type = map(string) + default = {} +} diff --git a/1-org/modules/centralized-logging/versions.tf b/1-org/modules/centralized-logging/versions.tf new file mode 100644 index 000000000..a9686ee70 --- /dev/null +++ b/1-org/modules/centralized-logging/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13" +} From 8322486bf2672cc5f19d2c447cb141fb2771aa90 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Wed, 10 Aug 2022 12:23:21 -0300 Subject: [PATCH 02/27] New inline Centralized Logging module --- 1-org/modules/centralized-logging/README.md | 115 +++++++++ 1-org/modules/centralized-logging/main.tf | 185 ++++++++++++++ 1-org/modules/centralized-logging/outputs.tf | 50 ++++ .../modules/centralized-logging/variables.tf | 235 ++++++++++++++++++ 1-org/modules/centralized-logging/versions.tf | 19 ++ 5 files changed, 604 insertions(+) create mode 100644 1-org/modules/centralized-logging/README.md create mode 100644 1-org/modules/centralized-logging/main.tf create mode 100644 1-org/modules/centralized-logging/outputs.tf create mode 100644 1-org/modules/centralized-logging/variables.tf create mode 100644 1-org/modules/centralized-logging/versions.tf diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md new file mode 100644 index 000000000..d01e150dc --- /dev/null +++ b/1-org/modules/centralized-logging/README.md @@ -0,0 +1,115 @@ +# Centralized Logging Module + +This module handles logging configuration enabling destination to: buckets, Big Query or Pub/Sub. + +## Usage + +Before using this module, one should get familiar with the `google_dataflow_flex_template_job`’s [Note on "destroy"/"apply"](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/dataflow_flex_template_job#note-on-destroy--apply) as the behavior is atypical when compared to other resources. + +## Requirements + +These sections describe requirements for running this module. + +### Software + +Install the following dependencies: + +- [Google Cloud SDK](https://cloud.google.com/sdk/install) version 357.0.0 or later. +- [Terraform](https://www.terraform.io/downloads.html) version 0.13.7 or later. + +### Deployer entity + +To provision the resources of this module, create a service account +with the following IAM roles: + +- Dataflow Developer:`roles/dataflow.developer`. + +### APIs + +The following APIs must be enabled in the project where the service account was created: + +- BigQuery API: `bigquery.googleapis.com`. +- Cloud Key Management Service (KMS) API: `cloudkms.googleapis.com`. +- Google Cloud Storage JSON API:`storage-api.googleapis.com`. +- Compute Engine API: `compute.googleapis.com`. +- Dataflow API: `dataflow.googleapis.com`. + +Any others APIs you pipeline may need. + +### Assumption + +One assumption is that, before using this module, you already have a working Dataflow flex job template(s) in a GCS location. +If you are not using public IPs, you need to [Configure Private Google Access](https://cloud.google.com/vpc/docs/configure-private-google-access) +on the VPC used by Dataflow. + +This is a simple usage: + +```hcl +module "dataflow-flex-job" { + source = "terraform-google-modules/secured-data-warehouse/google//modules/dataflow-flex-job" + version = "~> 0.1" + + project_id = "" + region = "us-east4" + name = "dataflow-flex-job-00001" + container_spec_gcs_path = "gs://" + staging_location = "gs://" + temp_location = "gs://" + subnetwork_self_link = "" + kms_key_name = "" + service_account_email = "" + + parameters = { + firstParameter = "ONE", + secondParameter = "TWO + } +} +``` + + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| bigquery\_options | (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables. |
object({
use_partitioned_tables = bool
})
| `null` | no | +| create\_push\_subscriber | (Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | +| create\_subscriber | (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | +| dataset\_description | (Optional) A user-friendly description of the dataset. Applies to destination: bigquery. | `string` | `""` | no | +| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to destination: bigquery and storage. | `bool` | `false` | no | +| exclusions | (Optional) A list of sink exclusion filters. |
list(object({
name = string,
description = string,
filter = string,
disabled = bool
}))
| `[]` | no | +| expiration\_days | (Optional) Table expiration time. If null logs will never be deleted. Applies to destination: bigquery. | `number` | `null` | no | +| include\_children | Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. | `bool` | `false` | no | +| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. | `string` | `null` | no | +| labels | (Optional) Labels attached to logging resources. | `map(string)` | `{}` | no | +| lifecycle\_rules | (Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches\_storage\_class should be a comma delimited string. Applies to destination: storage. |
set(object({
# Object with keys:
# - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass.
# - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule.
action = map(string)

# Object with keys:
# - age - (Optional) Minimum age of an object in days to satisfy this condition.
# - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.
# - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY".
# - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY.
# - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.
# - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true.
condition = map(string)
}))
| `[]` | no | +| logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | +| logging\_destination\_uri | The self\_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources. | `string` | `""` | no | +| logging\_location | (Optional) The location of the logging destination. Applies to destination: bigquery and storage. | `string` | `"US"` | no | +| logging\_project\_key | (Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource\_type = project and logging\_target\_type = logbucket. | `string` | `""` | no | +| logging\_sink\_filter | The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. | `string` | `""` | no | +| logging\_sink\_name | The name of the log sink to be created. | `string` | `""` | no | +| logging\_target\_name | The name of the logging container (logbucket, bigquery-dataset, storage, or pubsub-topic) that will store the logs. | `string` | `""` | no | +| logging\_target\_type | Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub | `string` | n/a | yes | +| push\_endpoint | (Optional) The URL locating the endpoint to which messages should be pushed. Applies to destination: pubsub. | `string` | `""` | no | +| resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization, or folder | `string` | n/a | yes | +| resources | Export logs from the specified resources. | `map(string)` | n/a | yes | +| retention\_days | (Optional) The number of days data should be retained for the log bucket. Applies to destination: logbucket. | `number` | `30` | no | +| retention\_policy | (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to destination: storage. |
object({
is_locked = bool
retention_period_days = number
})
| `null` | no | +| storage\_class | (Optional) The storage class of the storage bucket. Applies to destination: storage. | `string` | `"STANDARD"` | no | +| subscriber\_id | (Optional) The ID to give the pubsub pull subscriber service account. Applies to destination: pubsub. | `string` | `""` | no | +| subscription\_labels | (Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to destination: pubsub. | `map(string)` | `{}` | no | +| uniform\_bucket\_level\_access | (Optional) Enables Uniform bucket-level access to a bucket. Applies to destination: storage. | `bool` | `true` | no | +| versioning | (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to destination: storage. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| destination\_uri | n/a | +| filter | The filter to be applied when exporting logs. | +| log\_sinks\_id | The resource ID of the log sink that was created. | +| log\_sinks\_name | The resource name of the log sink that was created. | +| parent\_resource\_ids | The ID of the GCP resource in which you create the log sink. | +| resource\_name | The resource name for the destination | + + diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf new file mode 100644 index 000000000..e76431685 --- /dev/null +++ b/1-org/modules/centralized-logging/main.tf @@ -0,0 +1,185 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +locals { + key_first_resource = keys(var.resources)[0] + logbucket_sink_member = { for k, v in var.resources : k => v if k != var.logging_project_key } + resource_name = var.logging_target_type == "bigquery" ? module.destination_bigquery[0].resource_name : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].resource_name : var.logging_target_type == "storage" ? module.destination_storage[0].resource_name : module.destination_logbucket[0].resource_name + destination_uri = length(var.logging_destination_uri) > 0 ? var.logging_destination_uri : var.logging_target_type == "bigquery" ? module.destination_bigquery[0].destination_uri : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].destination_uri : var.logging_target_type == "storage" ? module.destination_storage[0].destination_uri : module.destination_logbucket[0].destination_uri + create_destination = !(length(var.logging_destination_uri) > 0) + logging_sink_name = length(var.logging_sink_name) > 0 ? var.logging_sink_name : "sk-to-${local.logging_target_name_prefix}-${var.logging_destination_project_id}" + logging_target_name_prefix = var.logging_target_type == "bigquery" ? "ds" : var.logging_target_type == "pubsub" ? "topic" : var.logging_target_type == "storage" ? "bkt" : "logbkt" + logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${local.logging_target_name_prefix}-${random_string.suffix.result}" + log_exports = setunion(local.log_exports_others) + parent_resource_ids = [for parent_resource_id in local.log_exports[*].parent_resource_id : parent_resource_id] + + # Bigquery sink options + bigquery_options = var.logging_target_type == "bigquery" && var.bigquery_options != null ? var.bigquery_options : null + + log_exports_others = toset([ + for value in module.log_export : value + ]) +} + +resource "random_string" "suffix" { + length = 8 + upper = false + special = false +} + +module "log_export" { + source = "terraform-google-modules/log-export/google" + version = "~> 7.3.0" + + for_each = var.resources + + destination_uri = local.destination_uri + filter = var.logging_sink_filter + log_sink_name = local.logging_sink_name + parent_resource_id = each.value + parent_resource_type = var.resource_type + unique_writer_identity = true + include_children = var.include_children + bigquery_options = local.bigquery_options + exclusions = var.exclusions +} + + +#-------------------------# +# Send logs to Log Bucket # +#-------------------------# +module "destination_logbucket" { + // source = "terraform-google-modules/log-export/google//modules/logbucket" + // version = "~> 7.4.0" + + source = "/work/terraform-google-log-export/modules/logbucket" + + count = local.create_destination && var.logging_target_type == "logbucket" ? 1 : 0 + + project_id = var.logging_destination_project_id + name = local.logging_target_name + log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + location = var.logging_location + retention_days = var.retention_days + grant_write_permission_on_bkt = false +} + +#-------------------------------------------# +# Log Bucket Service account IAM membership # +#-------------------------------------------# +resource "google_project_iam_member" "logbucket_sink_member" { + for_each = var.logging_target_type == "logbucket" ? local.logbucket_sink_member : {} + + project = var.logging_destination_project_id + role = "roles/logging.bucketWriter" + member = module.log_export[each.key].writer_identity +} + +#-----------------------# +# Send logs to BigQuery # +#-----------------------# +module "destination_bigquery" { + source = "terraform-google-modules/log-export/google//modules/bigquery" + version = "~> 7.3.0" + + count = local.create_destination && var.logging_target_type == "bigquery" ? 1 : 0 + + project_id = var.logging_destination_project_id + dataset_name = replace(local.logging_target_name, "-", "_") + log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + labels = var.labels + description = var.dataset_description + expiration_days = var.expiration_days + delete_contents_on_destroy = var.delete_contents_on_destroy +} + +#-----------------------------------------# +# Bigquery Service account IAM membership # +#-----------------------------------------# +resource "google_project_iam_member" "bigquery_sink_member" { + for_each = var.logging_target_type == "bigquery" ? var.resources : {} + + project = var.logging_destination_project_id + role = "roles/bigquery.dataEditor" + member = module.log_export[each.key].writer_identity +} + + +#----------------------# +# Send logs to Storage # +#----------------------# +module "destination_storage" { + source = "terraform-google-modules/log-export/google//modules/storage" + version = "~> 7.3.0" + + count = local.create_destination && var.logging_target_type == "storage" ? 1 : 0 + + project_id = var.logging_destination_project_id + storage_bucket_name = local.logging_target_name + log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + uniform_bucket_level_access = var.uniform_bucket_level_access + location = var.logging_location + storage_bucket_labels = var.labels + force_destroy = var.delete_contents_on_destroy + retention_policy = var.retention_policy + lifecycle_rules = var.lifecycle_rules + storage_class = var.storage_class + versioning = var.versioning +} + +#----------------------------------------# +# Storage Service account IAM membership # +#----------------------------------------# +resource "google_storage_bucket_iam_member" "storage_sink_member" { + for_each = var.logging_target_type == "storage" ? module.log_export : {} + + bucket = module.destination_storage[0].resource_name + role = "roles/storage.objectCreator" + member = each.value.writer_identity +} + + +#----------------------# +# Send logs to Pub\Sub # +#----------------------# +module "destination_pubsub" { + source = "terraform-google-modules/log-export/google//modules/pubsub" + version = "~> 7.3.0" + + count = local.create_destination && var.logging_target_type == "pubsub" ? 1 : 0 + + project_id = var.logging_destination_project_id + topic_name = local.logging_target_name + log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + topic_labels = var.labels + create_subscriber = var.create_subscriber + subscription_labels = var.subscription_labels + create_push_subscriber = var.create_push_subscriber + push_endpoint = var.push_endpoint + subscriber_id = var.subscriber_id +} + +#---------------------------------------# +# Pubsub Service account IAM membership # +#---------------------------------------# +resource "google_pubsub_topic_iam_member" "pubsub_sink_member" { + for_each = var.logging_target_type == "pubsub" ? module.log_export : {} + + project = var.logging_destination_project_id + topic = module.destination_pubsub[0].resource_name + role = "roles/pubsub.publisher" + member = each.value.writer_identity +} diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf new file mode 100644 index 000000000..b12666d25 --- /dev/null +++ b/1-org/modules/centralized-logging/outputs.tf @@ -0,0 +1,50 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +output "destination_uri" { + value = local.destination_uri +} + +output "filter" { + description = "The filter to be applied when exporting logs." + value = var.logging_sink_filter +} + +output "log_sinks_id" { + description = "The resource ID of the log sink that was created." + value = toset([ + for value in module.log_export : value.log_sink_resource_id + ]) +} + +output "log_sinks_name" { + description = "The resource name of the log sink that was created." + value = toset([ + for value in module.log_export : value.log_sink_resource_name + ]) +} + +output "parent_resource_ids" { + description = "The ID of the GCP resource in which you create the log sink." + value = toset([ + for value in module.log_export : value.parent_resource_id + ]) +} + +output "resource_name" { + description = "The resource name for the destination" + value = local.resource_name +} diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf new file mode 100644 index 000000000..378e76b48 --- /dev/null +++ b/1-org/modules/centralized-logging/variables.tf @@ -0,0 +1,235 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +variable "resources" { + description = "Export logs from the specified resources." + type = map(string) + + validation { + condition = length(var.resources) > 0 + error_message = "The resources map should have at least 1 item." + } +} + +variable "resource_type" { + description = "Resource type of the resource that will export logs to destination. Must be: project, organization, or folder" + type = string + + validation { + condition = contains(["project", "folder", "organization"], var.resource_type) + error_message = "The resource_type value must be: project, organization, or folder." + } +} + +variable "logging_project_key" { + description = "(Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource_type = project and logging_target_type = logbucket." + type = string + default = "" +} + +variable "logging_destination_project_id" { + description = "The ID of the project that will have the resources where the logs will be created." + type = string +} + +variable "logging_target_type" { + description = "Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub" + type = string + + validation { + condition = contains(["bigquery", "storage", "pubsub", "logbucket"], var.logging_target_type) + error_message = "The logging_target_type value must be: logbucket, bigquery, storage, or pubsub." + } +} + +variable "logging_target_name" { + description = "The name of the logging container (logbucket, bigquery-dataset, storage, or pubsub-topic) that will store the logs." + type = string + default = "" +} + +variable "logging_destination_uri" { + description = "The self_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources." + type = string + default = "" +} + +variable "logging_sink_name" { + description = "The name of the log sink to be created." + type = string + default = "" +} + +variable "logging_sink_filter" { + description = "The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs." + type = string + default = "" +} + +variable "include_children" { + description = "Only valid if 'organization' or 'folder' is chosen as var.resource_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included." + type = bool + default = false +} + +variable "exclusions" { + description = "(Optional) A list of sink exclusion filters." + type = list(object({ + name = string, + description = string, + filter = string, + disabled = bool + })) + default = [] +} + +variable "bigquery_options" { + description = "(Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables." + type = object({ + use_partitioned_tables = bool + }) + default = null +} + +variable "labels" { + description = "(Optional) Labels attached to logging resources." + type = map(string) + default = {} +} +variable "kms_key_name" { + description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination." + type = string + default = null +} + +variable "logging_location" { + description = "(Optional) The location of the logging destination. Applies to destination: bigquery and storage." + type = string + default = "US" +} + +variable "delete_contents_on_destroy" { + description = "(Optional) If set to true, delete all contained objects in the logging destination. Applies to destination: bigquery and storage." + type = bool + default = false +} + +#----------------------------- # +# Logbucket specific variables # +#----------------------------- # +variable "retention_days" { + description = "(Optional) The number of days data should be retained for the log bucket. Applies to destination: logbucket." + type = number + default = 30 +} + +#----------------------------- # +# Big Query specific variables # +#----------------------------- # +variable "dataset_description" { + description = "(Optional) A user-friendly description of the dataset. Applies to destination: bigquery." + type = string + default = "" +} + +variable "expiration_days" { + description = "(Optional) Table expiration time. If null logs will never be deleted. Applies to destination: bigquery." + type = number + default = null +} + +#--------------------------- # +# Storage specific variables # +#--------------------------- # +variable "storage_class" { + description = "(Optional) The storage class of the storage bucket. Applies to destination: storage." + type = string + default = "STANDARD" +} + +variable "uniform_bucket_level_access" { + description = "(Optional) Enables Uniform bucket-level access to a bucket. Applies to destination: storage." + type = bool + default = true +} + +variable "lifecycle_rules" { + type = set(object({ + # Object with keys: + # - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass. + # - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule. + action = map(string) + + # Object with keys: + # - age - (Optional) Minimum age of an object in days to satisfy this condition. + # - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition. + # - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY". + # - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY. + # - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition. + # - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true. + condition = map(string) + })) + description = "(Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches_storage_class should be a comma delimited string. Applies to destination: storage." + default = [] +} + +variable "retention_policy" { + description = "(Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to destination: storage." + type = object({ + is_locked = bool + retention_period_days = number + }) + default = null +} + +variable "versioning" { + description = "(Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to destination: storage." + type = bool + default = false +} + +#-------------------------- # +# Pubsub specific variables # +#-------------------------- # +variable "create_subscriber" { + description = "(Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub." + type = bool + default = false +} + +variable "subscriber_id" { + description = "(Optional) The ID to give the pubsub pull subscriber service account. Applies to destination: pubsub." + type = string + default = "" +} + +variable "subscription_labels" { + description = "(Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to destination: pubsub." + type = map(string) + default = {} +} + +variable "create_push_subscriber" { + description = "(Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub." + type = bool + default = false +} + +variable "push_endpoint" { + description = "(Optional) The URL locating the endpoint to which messages should be pushed. Applies to destination: pubsub." + type = string + default = "" +} diff --git a/1-org/modules/centralized-logging/versions.tf b/1-org/modules/centralized-logging/versions.tf new file mode 100644 index 000000000..a9686ee70 --- /dev/null +++ b/1-org/modules/centralized-logging/versions.tf @@ -0,0 +1,19 @@ +/** + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +terraform { + required_version = ">= 0.13" +} From e6dfc6fd4b65e68b53e5fffdf25e40417e730c2f Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 15:11:33 -0300 Subject: [PATCH 03/27] New logbucket destination module path --- 1-org/modules/centralized-logging/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index e76431685..33deeb377 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -63,9 +63,9 @@ module "log_export" { #-------------------------# module "destination_logbucket" { // source = "terraform-google-modules/log-export/google//modules/logbucket" - // version = "~> 7.4.0" + // version = "~> 7.4.2" - source = "/work/terraform-google-log-export/modules/logbucket" + source = "github.com/terraform-google-modules/terraform-google-log-export//modules/logbucket" count = local.create_destination && var.logging_target_type == "logbucket" ? 1 : 0 From 7136d7eab59cfe356b01a74718764ecc39ff40f5 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 15:14:01 -0300 Subject: [PATCH 04/27] -Using new inline centralized logging module for log -Added logbucket as new logging destination --- 1-org/README.md | 2 +- 1-org/envs/shared/log_sinks.tf | 115 +++++++++++++++---------------- 1-org/envs/shared/outputs.tf | 5 ++ README.md | 2 +- test/integration/org/org_test.go | 11 +++ 5 files changed, 74 insertions(+), 61 deletions(-) diff --git a/1-org/README.md b/1-org/README.md index 7403e471a..c306d6070 100644 --- a/1-org/README.md +++ b/1-org/README.md @@ -79,7 +79,7 @@ Enabling Data Access logs might result in your project being charged for the add For details on costs you might incur, go to [Pricing](https://cloud.google.com/stackdriver/pricing). You can choose not to enable the Data Access logs by setting variable `data_access_logs_enabled` to false. -**Note:** This module creates a sink to export all logs to Google Storage. It also creates sinks to export a subset of security related logs +**Note:** This module creates a sink to export all logs to Google Storage and Log Bucket. It also creates sinks to export a subset of security related logs to Bigquery and Pub/Sub. This will result in additional charges for those copies of logs. You can change the filters & sinks by modifying the configuration in `envs/shared/log_sinks.tf`. diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index 02dccc000..d263af7c3 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -17,6 +17,7 @@ locals { parent_resource_id = var.parent_folder != "" ? var.parent_folder : var.org_id parent_resource_type = var.parent_folder != "" ? "folder" : "organization" + parent_resources = { resource = local.parent_resource_id } main_logs_filter = < Date: Thu, 11 Aug 2022 15:42:51 -0300 Subject: [PATCH 05/27] Fix missing logbucket name in doc --- 1-org/envs/shared/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/1-org/envs/shared/README.md b/1-org/envs/shared/README.md index 7c178d2ed..9154893b8 100644 --- a/1-org/envs/shared/README.md +++ b/1-org/envs/shared/README.md @@ -70,6 +70,7 @@ | dns\_hub\_project\_id | The DNS hub project ID | | domains\_to\_allow | The list of domains to allow users from in IAM. | | interconnect\_project\_id | The Dedicated Interconnect project ID | +| logs\_export\_logbucket\_name | The log bucket for destination of log exports | | logs\_export\_pubsub\_topic | The Pub/Sub topic for destination of log exports | | logs\_export\_storage\_bucket\_name | The storage bucket for destination of log exports | | org\_audit\_logs\_project\_id | The org audit logs project ID | From 305d306755d9f33f9bf529c3c585fbeeb4ddea52 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 16:06:36 -0300 Subject: [PATCH 06/27] Add support to Cloud KMS CryptoKey --- 1-org/modules/centralized-logging/main.tf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index 33deeb377..0f8c91db1 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -102,6 +102,7 @@ module "destination_bigquery" { log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity labels = var.labels description = var.dataset_description + kms_key_name = var.kms_key_name expiration_days = var.expiration_days delete_contents_on_destroy = var.delete_contents_on_destroy } @@ -130,6 +131,7 @@ module "destination_storage" { project_id = var.logging_destination_project_id storage_bucket_name = local.logging_target_name log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + kms_key_name = var.kms_key_name uniform_bucket_level_access = var.uniform_bucket_level_access location = var.logging_location storage_bucket_labels = var.labels @@ -164,6 +166,7 @@ module "destination_pubsub" { project_id = var.logging_destination_project_id topic_name = local.logging_target_name log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity + kms_key_name = var.kms_key_name topic_labels = var.labels create_subscriber = var.create_subscriber subscription_labels = var.subscription_labels From b0b2e0215374c34ba0921f84fc08f9424a0271a1 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 16:07:06 -0300 Subject: [PATCH 07/27] Fix typos --- 1-org/modules/centralized-logging/README.md | 6 +++--- 1-org/modules/centralized-logging/variables.tf | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index d01e150dc..a0d1ee895 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -75,16 +75,16 @@ module "dataflow-flex-job" { | create\_push\_subscriber | (Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | | create\_subscriber | (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | | dataset\_description | (Optional) A user-friendly description of the dataset. Applies to destination: bigquery. | `string` | `""` | no | -| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to destination: bigquery and storage. | `bool` | `false` | no | +| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to destinations: bigquery and storage. | `bool` | `false` | no | | exclusions | (Optional) A list of sink exclusion filters. |
list(object({
name = string,
description = string,
filter = string,
disabled = bool
}))
| `[]` | no | | expiration\_days | (Optional) Table expiration time. If null logs will never be deleted. Applies to destination: bigquery. | `number` | `null` | no | | include\_children | Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. | `bool` | `false` | no | -| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. | `string` | `null` | no | +| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to destinations: bigquery, storage, and pubsub. | `string` | `null` | no | | labels | (Optional) Labels attached to logging resources. | `map(string)` | `{}` | no | | lifecycle\_rules | (Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches\_storage\_class should be a comma delimited string. Applies to destination: storage. |
set(object({
# Object with keys:
# - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass.
# - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule.
action = map(string)

# Object with keys:
# - age - (Optional) Minimum age of an object in days to satisfy this condition.
# - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.
# - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY".
# - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY.
# - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.
# - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true.
condition = map(string)
}))
| `[]` | no | | logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | | logging\_destination\_uri | The self\_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources. | `string` | `""` | no | -| logging\_location | (Optional) The location of the logging destination. Applies to destination: bigquery and storage. | `string` | `"US"` | no | +| logging\_location | (Optional) The location of the logging destination. Applies to destinations: bigquery and storage. | `string` | `"US"` | no | | logging\_project\_key | (Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource\_type = project and logging\_target\_type = logbucket. | `string` | `""` | no | | logging\_sink\_filter | The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. | `string` | `""` | no | | logging\_sink\_name | The name of the log sink to be created. | `string` | `""` | no | diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index 378e76b48..142fb875c 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -110,19 +110,19 @@ variable "labels" { default = {} } variable "kms_key_name" { - description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination." + description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to destinations: bigquery, storage, and pubsub." type = string default = null } variable "logging_location" { - description = "(Optional) The location of the logging destination. Applies to destination: bigquery and storage." + description = "(Optional) The location of the logging destination. Applies to destinations: bigquery and storage." type = string default = "US" } variable "delete_contents_on_destroy" { - description = "(Optional) If set to true, delete all contained objects in the logging destination. Applies to destination: bigquery and storage." + description = "(Optional) If set to true, delete all contained objects in the logging destination. Applies to destinations: bigquery and storage." type = bool default = false } From 135761ed7083f8d5fed8babd4de0d2a0611374c2 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 18:22:17 -0300 Subject: [PATCH 08/27] Reviewed module documentation --- 1-org/modules/centralized-logging/README.md | 72 +++++---------------- 1 file changed, 16 insertions(+), 56 deletions(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index a0d1ee895..1fa7588b4 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -1,68 +1,28 @@ # Centralized Logging Module -This module handles logging configuration enabling destination to: buckets, Big Query or Pub/Sub. +This module handles logging configuration enabling destination to: GCS bucket, Big Query, Pub/Sub, or Log Bucket. ## Usage -Before using this module, one should get familiar with the `google_dataflow_flex_template_job`’s [Note on "destroy"/"apply"](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/dataflow_flex_template_job#note-on-destroy--apply) as the behavior is atypical when compared to other resources. - -## Requirements - -These sections describe requirements for running this module. - -### Software - -Install the following dependencies: - -- [Google Cloud SDK](https://cloud.google.com/sdk/install) version 357.0.0 or later. -- [Terraform](https://www.terraform.io/downloads.html) version 0.13.7 or later. - -### Deployer entity - -To provision the resources of this module, create a service account -with the following IAM roles: - -- Dataflow Developer:`roles/dataflow.developer`. - -### APIs - -The following APIs must be enabled in the project where the service account was created: - -- BigQuery API: `bigquery.googleapis.com`. -- Cloud Key Management Service (KMS) API: `cloudkms.googleapis.com`. -- Google Cloud Storage JSON API:`storage-api.googleapis.com`. -- Compute Engine API: `compute.googleapis.com`. -- Dataflow API: `dataflow.googleapis.com`. - -Any others APIs you pipeline may need. - -### Assumption - -One assumption is that, before using this module, you already have a working Dataflow flex job template(s) in a GCS location. -If you are not using public IPs, you need to [Configure Private Google Access](https://cloud.google.com/vpc/docs/configure-private-google-access) -on the VPC used by Dataflow. - -This is a simple usage: +The following example exports just audit logs from two folders to the same storage destination: ```hcl -module "dataflow-flex-job" { - source = "terraform-google-modules/secured-data-warehouse/google//modules/dataflow-flex-job" - version = "~> 0.1" - - project_id = "" - region = "us-east4" - name = "dataflow-flex-job-00001" - container_spec_gcs_path = "gs://" - staging_location = "gs://" - temp_location = "gs://" - subnetwork_self_link = "" - kms_key_name = "" - service_account_email = "" +module "logging_storage" { + source = "terraform-google-modules/terraform-example-foundation/google//1-org/modules/centralized-logging" - parameters = { - firstParameter = "ONE", - secondParameter = "TWO + resources = { + fldr1 = "" + fldr2 = "" } + resource_type = "folder" + logging_sink_filter = local.all_logs_filter + logging_sink_name = "sk-c-logging-bkt" + include_children = true + logging_target_type = "storage" + logging_destination_project_id = "" + logging_target_name = "bkt-audit-logs" + uniform_bucket_level_access = true + logging_location = "US" } ``` From 04dab9ad7268cf0b79877042baf019916bef305f Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Thu, 11 Aug 2022 18:25:36 -0300 Subject: [PATCH 09/27] Fix readme log sink filter --- 1-org/modules/centralized-logging/README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index 1fa7588b4..16fec6971 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -15,7 +15,14 @@ module "logging_storage" { fldr2 = "" } resource_type = "folder" - logging_sink_filter = local.all_logs_filter + logging_sink_filter = < Date: Fri, 12 Aug 2022 11:13:19 -0300 Subject: [PATCH 10/27] Fix variable description and improve module documentation --- 1-org/modules/centralized-logging/README.md | 72 +++++++++++++------ 1-org/modules/centralized-logging/outputs.tf | 3 +- .../modules/centralized-logging/variables.tf | 38 +++++----- 3 files changed, 71 insertions(+), 42 deletions(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index 16fec6971..87e3e0960 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -1,10 +1,12 @@ # Centralized Logging Module -This module handles logging configuration enabling destination to: GCS bucket, Big Query, Pub/Sub, or Log Bucket. +This module handles logging configuration enabling one or more resources such as organization, folders, or projects to send logs to a destination: GCS bucket, Big Query, Pub/Sub, or Log Bucket. ## Usage -The following example exports just audit logs from two folders to the same storage destination: +Before using this module, get familiar with the [log-export](https://registry.terraform.io/modules/terraform-google-modules/log-export/google/latest) module that is the base for it. + +The following example exports audit logs from two folders to the same storage destination: ```hcl module "logging_storage" { @@ -33,46 +35,72 @@ EOF } ``` +Heads up when the destination is a Log Bucket and the logging destination project is also a resource. If it is the case, do not forget to set `logging_project_key` variable with the logging destination project key from map resources. Get more details at [Configure and manage sinks](https://cloud.google.com/logging/docs/export/configure_export_v2#dest-auth:~:text=If%20you%27re%20using%20a%20sink%20to%20route%20logs%20between%20Logging%20buckets%20in%20the%20same%20Cloud%20project%2C%20no%20new%20service%20account%20is%20created%3B%20the%20sink%20works%20without%20the%20unique%20writer%20identity.). + +The following example exports all logs from three projects - including the logging destination project - to a Log Bucket destination. As it exports all logs be aware of additional charges for this amount of logs: + +```hcl +module "logging_logbucket" { + source = "terraform-google-modules/terraform-example-foundation/google//1-org/modules/centralized-logging" + + resources = { + prj1 = "" + prj2 = "" + prjx = "" + } + resource_type = "project" + logging_sink_filter = "" + logging_sink_name = "sk-c-logging-logbkt" + include_children = true + logging_target_type = "logbucket" + logging_destination_project_id = "" + logging_target_name = "logbkt-logs" + uniform_bucket_level_access = true + logging_location = "US" + logging_project_key = "prj1" +} +``` + ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| bigquery\_options | (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables. |
object({
use_partitioned_tables = bool
})
| `null` | no | -| create\_push\_subscriber | (Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | -| create\_subscriber | (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub. | `bool` | `false` | no | -| dataset\_description | (Optional) A user-friendly description of the dataset. Applies to destination: bigquery. | `string` | `""` | no | -| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to destinations: bigquery and storage. | `bool` | `false` | no | +| bigquery\_options | (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables. Applies to logging target type: bigquery. |
object({
use_partitioned_tables = bool
})
| `null` | no | +| create\_push\_subscriber | (Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub. | `bool` | `false` | no | +| create\_subscriber | (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub. | `bool` | `false` | no | +| dataset\_description | (Optional) A user-friendly description of the dataset. Applies to logging target type: bigquery. | `string` | `""` | no | +| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to logging target types: bigquery and storage. | `bool` | `false` | no | | exclusions | (Optional) A list of sink exclusion filters. |
list(object({
name = string,
description = string,
filter = string,
disabled = bool
}))
| `[]` | no | -| expiration\_days | (Optional) Table expiration time. If null logs will never be deleted. Applies to destination: bigquery. | `number` | `null` | no | +| expiration\_days | (Optional) Table expiration time. If null logs will never be deleted. Applies to logging target type: bigquery. | `number` | `null` | no | | include\_children | Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. | `bool` | `false` | no | -| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to destinations: bigquery, storage, and pubsub. | `string` | `null` | no | +| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to logging target types: bigquery, storage, and pubsub. | `string` | `null` | no | | labels | (Optional) Labels attached to logging resources. | `map(string)` | `{}` | no | -| lifecycle\_rules | (Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches\_storage\_class should be a comma delimited string. Applies to destination: storage. |
set(object({
# Object with keys:
# - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass.
# - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule.
action = map(string)

# Object with keys:
# - age - (Optional) Minimum age of an object in days to satisfy this condition.
# - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.
# - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY".
# - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY.
# - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.
# - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true.
condition = map(string)
}))
| `[]` | no | +| lifecycle\_rules | (Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches\_storage\_class should be a comma delimited string. Applies to logging target type: storage. |
set(object({
# Object with keys:
# - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass.
# - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule.
action = map(string)

# Object with keys:
# - age - (Optional) Minimum age of an object in days to satisfy this condition.
# - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.
# - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY".
# - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY.
# - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.
# - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true.
condition = map(string)
}))
| `[]` | no | | logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | | logging\_destination\_uri | The self\_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources. | `string` | `""` | no | -| logging\_location | (Optional) The location of the logging destination. Applies to destinations: bigquery and storage. | `string` | `"US"` | no | +| logging\_location | (Optional) The location of the logging destination. Applies to logging target types: bigquery and storage. | `string` | `"US"` | no | | logging\_project\_key | (Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource\_type = project and logging\_target\_type = logbucket. | `string` | `""` | no | | logging\_sink\_filter | The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. | `string` | `""` | no | | logging\_sink\_name | The name of the log sink to be created. | `string` | `""` | no | | logging\_target\_name | The name of the logging container (logbucket, bigquery-dataset, storage, or pubsub-topic) that will store the logs. | `string` | `""` | no | -| logging\_target\_type | Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub | `string` | n/a | yes | -| push\_endpoint | (Optional) The URL locating the endpoint to which messages should be pushed. Applies to destination: pubsub. | `string` | `""` | no | -| resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization, or folder | `string` | n/a | yes | +| logging\_target\_type | Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub. | `string` | n/a | yes | +| push\_endpoint | (Optional) The URL locating the endpoint to which messages should be pushed. Applies to logging target type: pubsub. | `string` | `""` | no | +| resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization, or folder. | `string` | n/a | yes | | resources | Export logs from the specified resources. | `map(string)` | n/a | yes | -| retention\_days | (Optional) The number of days data should be retained for the log bucket. Applies to destination: logbucket. | `number` | `30` | no | -| retention\_policy | (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to destination: storage. |
object({
is_locked = bool
retention_period_days = number
})
| `null` | no | -| storage\_class | (Optional) The storage class of the storage bucket. Applies to destination: storage. | `string` | `"STANDARD"` | no | -| subscriber\_id | (Optional) The ID to give the pubsub pull subscriber service account. Applies to destination: pubsub. | `string` | `""` | no | -| subscription\_labels | (Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to destination: pubsub. | `map(string)` | `{}` | no | -| uniform\_bucket\_level\_access | (Optional) Enables Uniform bucket-level access to a bucket. Applies to destination: storage. | `bool` | `true` | no | -| versioning | (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to destination: storage. | `bool` | `false` | no | +| retention\_days | (Optional) The number of days data should be retained for the log bucket. Applies to logging target type: logbucket. | `number` | `30` | no | +| retention\_policy | (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to logging target type: storage. |
object({
is_locked = bool
retention_period_days = number
})
| `null` | no | +| storage\_class | (Optional) The storage class of the storage bucket. Applies to logging target type: storage. | `string` | `"STANDARD"` | no | +| subscriber\_id | (Optional) The ID to give the pubsub pull subscriber service account. Applies to logging target type: pubsub. | `string` | `""` | no | +| subscription\_labels | (Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to logging target type: pubsub. | `map(string)` | `{}` | no | +| uniform\_bucket\_level\_access | (Optional) Enables Uniform bucket-level access to a bucket. Applies to logging target type: storage. | `bool` | `true` | no | +| versioning | (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to logging target type: storage. | `bool` | `false` | no | ## Outputs | Name | Description | |------|-------------| -| destination\_uri | n/a | +| destination\_uri | The destination URI for the selected logging target type. | | filter | The filter to be applied when exporting logs. | | log\_sinks\_id | The resource ID of the log sink that was created. | | log\_sinks\_name | The resource name of the log sink that was created. | diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf index b12666d25..3a890aed1 100644 --- a/1-org/modules/centralized-logging/outputs.tf +++ b/1-org/modules/centralized-logging/outputs.tf @@ -15,7 +15,8 @@ */ output "destination_uri" { - value = local.destination_uri + description = "The destination URI for the selected logging target type." + value = local.destination_uri } output "filter" { diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index 142fb875c..0b1a10c82 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -25,7 +25,7 @@ variable "resources" { } variable "resource_type" { - description = "Resource type of the resource that will export logs to destination. Must be: project, organization, or folder" + description = "Resource type of the resource that will export logs to destination. Must be: project, organization, or folder." type = string validation { @@ -46,7 +46,7 @@ variable "logging_destination_project_id" { } variable "logging_target_type" { - description = "Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub" + description = "Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub." type = string validation { @@ -97,7 +97,7 @@ variable "exclusions" { } variable "bigquery_options" { - description = "(Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables." + description = "(Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables. Applies to logging target type: bigquery." type = object({ use_partitioned_tables = bool }) @@ -110,19 +110,19 @@ variable "labels" { default = {} } variable "kms_key_name" { - description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to destinations: bigquery, storage, and pubsub." + description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to logging target types: bigquery, storage, and pubsub." type = string default = null } variable "logging_location" { - description = "(Optional) The location of the logging destination. Applies to destinations: bigquery and storage." + description = "(Optional) The location of the logging destination. Applies to logging target types: bigquery and storage." type = string default = "US" } variable "delete_contents_on_destroy" { - description = "(Optional) If set to true, delete all contained objects in the logging destination. Applies to destinations: bigquery and storage." + description = "(Optional) If set to true, delete all contained objects in the logging destination. Applies to logging target types: bigquery and storage." type = bool default = false } @@ -131,7 +131,7 @@ variable "delete_contents_on_destroy" { # Logbucket specific variables # #----------------------------- # variable "retention_days" { - description = "(Optional) The number of days data should be retained for the log bucket. Applies to destination: logbucket." + description = "(Optional) The number of days data should be retained for the log bucket. Applies to logging target type: logbucket." type = number default = 30 } @@ -140,13 +140,13 @@ variable "retention_days" { # Big Query specific variables # #----------------------------- # variable "dataset_description" { - description = "(Optional) A user-friendly description of the dataset. Applies to destination: bigquery." + description = "(Optional) A user-friendly description of the dataset. Applies to logging target type: bigquery." type = string default = "" } variable "expiration_days" { - description = "(Optional) Table expiration time. If null logs will never be deleted. Applies to destination: bigquery." + description = "(Optional) Table expiration time. If null logs will never be deleted. Applies to logging target type: bigquery." type = number default = null } @@ -155,13 +155,13 @@ variable "expiration_days" { # Storage specific variables # #--------------------------- # variable "storage_class" { - description = "(Optional) The storage class of the storage bucket. Applies to destination: storage." + description = "(Optional) The storage class of the storage bucket. Applies to logging target type: storage." type = string default = "STANDARD" } variable "uniform_bucket_level_access" { - description = "(Optional) Enables Uniform bucket-level access to a bucket. Applies to destination: storage." + description = "(Optional) Enables Uniform bucket-level access to a bucket. Applies to logging target type: storage." type = bool default = true } @@ -182,12 +182,12 @@ variable "lifecycle_rules" { # - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true. condition = map(string) })) - description = "(Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches_storage_class should be a comma delimited string. Applies to destination: storage." + description = "(Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches_storage_class should be a comma delimited string. Applies to logging target type: storage." default = [] } variable "retention_policy" { - description = "(Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to destination: storage." + description = "(Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to logging target type: storage." type = object({ is_locked = bool retention_period_days = number @@ -196,7 +196,7 @@ variable "retention_policy" { } variable "versioning" { - description = "(Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to destination: storage." + description = "(Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to logging target type: storage." type = bool default = false } @@ -205,31 +205,31 @@ variable "versioning" { # Pubsub specific variables # #-------------------------- # variable "create_subscriber" { - description = "(Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub." + description = "(Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub." type = bool default = false } variable "subscriber_id" { - description = "(Optional) The ID to give the pubsub pull subscriber service account. Applies to destination: pubsub." + description = "(Optional) The ID to give the pubsub pull subscriber service account. Applies to logging target type: pubsub." type = string default = "" } variable "subscription_labels" { - description = "(Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to destination: pubsub." + description = "(Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to logging target type: pubsub." type = map(string) default = {} } variable "create_push_subscriber" { - description = "(Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to destination: pubsub." + description = "(Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub." type = bool default = false } variable "push_endpoint" { - description = "(Optional) The URL locating the endpoint to which messages should be pushed. Applies to destination: pubsub." + description = "(Optional) The URL locating the endpoint to which messages should be pushed. Applies to logging target type: pubsub." type = string default = "" } From a71059ca3239ae28c53cf11271fdf3bfecaa5046 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 15 Aug 2022 10:29:17 -0300 Subject: [PATCH 11/27] Project id removed from Log Bucket name because it is not global unique as storage names --- 1-org/envs/shared/log_sinks.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index d263af7c3..fa7e45d23 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -110,7 +110,7 @@ module "logbucket_destination" { include_children = true logging_target_type = "logbucket" logging_destination_project_id = module.org_audit_logs.project_id - logging_target_name = "logbkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" + logging_target_name = "logbkt-org-logs-${random_string.suffix.result}" logging_location = var.default_region } From e40ec8b3f3523c2288d24f55b418fbde0501e45a Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 15 Aug 2022 10:29:52 -0300 Subject: [PATCH 12/27] Added information about Log bucket free cost --- 1-org/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/README.md b/1-org/README.md index c306d6070..60892b73c 100644 --- a/1-org/README.md +++ b/1-org/README.md @@ -80,7 +80,7 @@ For details on costs you might incur, go to [Pricing](https://cloud.google.com/s You can choose not to enable the Data Access logs by setting variable `data_access_logs_enabled` to false. **Note:** This module creates a sink to export all logs to Google Storage and Log Bucket. It also creates sinks to export a subset of security related logs -to Bigquery and Pub/Sub. This will result in additional charges for those copies of logs. +to Bigquery and Pub/Sub. This will result in additional charges for those copies of logs. For Log Bucket destination, logs retained for the default retention period (30 days) [don't incur a storage cost](https://cloud.google.com/stackdriver/pricing#:~:text=Logs%20retained%20for%20the%20default%20retention%20period%20don%27t%20incur%20a%20storage%20cost.). You can change the filters & sinks by modifying the configuration in `envs/shared/log_sinks.tf`. **Note:** Currently, this module does not enable [bucket policy retention](https://cloud.google.com/storage/docs/bucket-lock) for organization logs, please, enable it if needed. From ae009b6618e72bbd8b9262061a5b6afeef0aaa36 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio <108549791+felipecrescencio-cit@users.noreply.github.com> Date: Mon, 15 Aug 2022 15:50:19 -0300 Subject: [PATCH 13/27] Added link with additional information Co-authored-by: Daniel Andrade --- 1-org/envs/shared/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/envs/shared/outputs.tf b/1-org/envs/shared/outputs.tf index 2ede4bb40..5b800a233 100644 --- a/1-org/envs/shared/outputs.tf +++ b/1-org/envs/shared/outputs.tf @@ -96,5 +96,5 @@ output "logs_export_storage_bucket_name" { output "logs_export_logbucket_name" { value = module.logbucket_destination.resource_name - description = "The log bucket for destination of log exports" + description = "The log bucket for destination of log exports. See https://cloud.google.com/logging/docs/routing/overview#buckets" } From ab5e0ae9d007315bfdbaa0dccd6c7877e926c13b Mon Sep 17 00:00:00 2001 From: Felipe Crescencio <108549791+felipecrescencio-cit@users.noreply.github.com> Date: Mon, 15 Aug 2022 15:51:05 -0300 Subject: [PATCH 14/27] Added links with additional information about sink destinations Co-authored-by: Daniel Andrade --- 1-org/modules/centralized-logging/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index 87e3e0960..a480ff6a2 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -1,6 +1,6 @@ # Centralized Logging Module -This module handles logging configuration enabling one or more resources such as organization, folders, or projects to send logs to a destination: GCS bucket, Big Query, Pub/Sub, or Log Bucket. +This module handles logging configuration enabling one or more resources such as organization, folders, or projects to send logs to a destination: [GCS bucket](https://cloud.google.com/logging/docs/export/using_exported_logs#gcs-overview), [Big Query](https://cloud.google.com/logging/docs/export/bigquery), [Pub/Sub](https://cloud.google.com/logging/docs/export/using_exported_logs#pubsub-overview), or [Log Buckets](https://cloud.google.com/logging/docs/routing/overview#buckets). ## Usage From c746c82ac57e37b18be6d27a40681a6e4abc2501 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio <108549791+felipecrescencio-cit@users.noreply.github.com> Date: Mon, 15 Aug 2022 15:54:03 -0300 Subject: [PATCH 15/27] Improve to clarify documentation Co-authored-by: Daniel Andrade --- 1-org/modules/centralized-logging/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index a480ff6a2..2eabbde8e 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -35,7 +35,9 @@ EOF } ``` -Heads up when the destination is a Log Bucket and the logging destination project is also a resource. If it is the case, do not forget to set `logging_project_key` variable with the logging destination project key from map resources. Get more details at [Configure and manage sinks](https://cloud.google.com/logging/docs/export/configure_export_v2#dest-auth:~:text=If%20you%27re%20using%20a%20sink%20to%20route%20logs%20between%20Logging%20buckets%20in%20the%20same%20Cloud%20project%2C%20no%20new%20service%20account%20is%20created%3B%20the%20sink%20works%20without%20the%20unique%20writer%20identity.). +**Note:** when the destination is a Log Bucket and a sink is been created in the same project, set variable + `logging_project_key` with the **key** used to map the Log Bucket project in the `resources` map. +Get more details at [Configure and manage sinks](https://cloud.google.com/logging/docs/export/configure_export_v2#dest-auth:~:text=If%20you%27re%20using%20a%20sink%20to%20route%20logs%20between%20Logging%20buckets%20in%20the%20same%20Cloud%20project%2C%20no%20new%20service%20account%20is%20created%3B%20the%20sink%20works%20without%20the%20unique%20writer%20identity.). The following example exports all logs from three projects - including the logging destination project - to a Log Bucket destination. As it exports all logs be aware of additional charges for this amount of logs: From 0d931050f1fc4d776ffda62eaf37b4917e520715 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 15 Aug 2022 17:09:16 -0300 Subject: [PATCH 16/27] Added link with additional info --- 1-org/envs/shared/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/envs/shared/README.md b/1-org/envs/shared/README.md index 9154893b8..ce0194149 100644 --- a/1-org/envs/shared/README.md +++ b/1-org/envs/shared/README.md @@ -70,7 +70,7 @@ | dns\_hub\_project\_id | The DNS hub project ID | | domains\_to\_allow | The list of domains to allow users from in IAM. | | interconnect\_project\_id | The Dedicated Interconnect project ID | -| logs\_export\_logbucket\_name | The log bucket for destination of log exports | +| logs\_export\_logbucket\_name | The log bucket for destination of log exports. See https://cloud.google.com/logging/docs/routing/overview#buckets | | logs\_export\_pubsub\_topic | The Pub/Sub topic for destination of log exports | | logs\_export\_storage\_bucket\_name | The storage bucket for destination of log exports | | org\_audit\_logs\_project\_id | The org audit logs project ID | From 2ca9600e13af121926f2971c93aeaa489073b0ea Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 15 Aug 2022 17:16:56 -0300 Subject: [PATCH 17/27] Clean unused locals --- 1-org/modules/centralized-logging/main.tf | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index 0f8c91db1..8b02f2e8f 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -21,17 +21,11 @@ locals { destination_uri = length(var.logging_destination_uri) > 0 ? var.logging_destination_uri : var.logging_target_type == "bigquery" ? module.destination_bigquery[0].destination_uri : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].destination_uri : var.logging_target_type == "storage" ? module.destination_storage[0].destination_uri : module.destination_logbucket[0].destination_uri create_destination = !(length(var.logging_destination_uri) > 0) logging_sink_name = length(var.logging_sink_name) > 0 ? var.logging_sink_name : "sk-to-${local.logging_target_name_prefix}-${var.logging_destination_project_id}" - logging_target_name_prefix = var.logging_target_type == "bigquery" ? "ds" : var.logging_target_type == "pubsub" ? "topic" : var.logging_target_type == "storage" ? "bkt" : "logbkt" - logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${local.logging_target_name_prefix}-${random_string.suffix.result}" - log_exports = setunion(local.log_exports_others) - parent_resource_ids = [for parent_resource_id in local.log_exports[*].parent_resource_id : parent_resource_id] + logging_target_name_prefix = var.logging_target_type == "bigquery" ? "ds_logs_" : var.logging_target_type == "pubsub" ? "topic-logs-" : var.logging_target_type == "storage" ? "bkt-logs-" : "logbkt-logs-" + logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${local.logging_target_name_prefix}${random_string.suffix.result}" - # Bigquery sink options bigquery_options = var.logging_target_type == "bigquery" && var.bigquery_options != null ? var.bigquery_options : null - - log_exports_others = toset([ - for value in module.log_export : value - ]) + # Bigquery sink options - Enabling option use_partitioned_tables will store logs into a single table that is internally partitioned by day which can improve query performance. } resource "random_string" "suffix" { From 21407e6db5c2eb1d3b6c54cf02cb2c81121dd4b5 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 15 Aug 2022 17:18:03 -0300 Subject: [PATCH 18/27] Fix example codes --- 1-org/modules/centralized-logging/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index 2eabbde8e..fa2f4c149 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -31,12 +31,11 @@ EOF logging_destination_project_id = "" logging_target_name = "bkt-audit-logs" uniform_bucket_level_access = true - logging_location = "US" + logging_location = "us-central1" } ``` -**Note:** when the destination is a Log Bucket and a sink is been created in the same project, set variable - `logging_project_key` with the **key** used to map the Log Bucket project in the `resources` map. +**Note:** When the destination is a Log Bucket and a sink is been created in the same project, set variable `logging_project_key` with the **key** used to map the Log Bucket project in the `resources` map. Get more details at [Configure and manage sinks](https://cloud.google.com/logging/docs/export/configure_export_v2#dest-auth:~:text=If%20you%27re%20using%20a%20sink%20to%20route%20logs%20between%20Logging%20buckets%20in%20the%20same%20Cloud%20project%2C%20no%20new%20service%20account%20is%20created%3B%20the%20sink%20works%20without%20the%20unique%20writer%20identity.). The following example exports all logs from three projects - including the logging destination project - to a Log Bucket destination. As it exports all logs be aware of additional charges for this amount of logs: @@ -58,7 +57,7 @@ module "logging_logbucket" { logging_destination_project_id = "" logging_target_name = "logbkt-logs" uniform_bucket_level_access = true - logging_location = "US" + logging_location = "us-central1" logging_project_key = "prj1" } ``` From 7de8aca0b54129a02f34033681ee084230695799 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Tue, 16 Aug 2022 10:19:25 -0300 Subject: [PATCH 19/27] -Improve auto-generated names for sinks and target -Improve code readability using maps and lookup --- 1-org/modules/centralized-logging/main.tf | 41 ++++++++++++++++++----- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index 8b02f2e8f..e1e353cda 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -15,21 +15,44 @@ */ locals { - key_first_resource = keys(var.resources)[0] - logbucket_sink_member = { for k, v in var.resources : k => v if k != var.logging_project_key } - resource_name = var.logging_target_type == "bigquery" ? module.destination_bigquery[0].resource_name : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].resource_name : var.logging_target_type == "storage" ? module.destination_storage[0].resource_name : module.destination_logbucket[0].resource_name - destination_uri = length(var.logging_destination_uri) > 0 ? var.logging_destination_uri : var.logging_target_type == "bigquery" ? module.destination_bigquery[0].destination_uri : var.logging_target_type == "pubsub" ? module.destination_pubsub[0].destination_uri : var.logging_target_type == "storage" ? module.destination_storage[0].destination_uri : module.destination_logbucket[0].destination_uri - create_destination = !(length(var.logging_destination_uri) > 0) - logging_sink_name = length(var.logging_sink_name) > 0 ? var.logging_sink_name : "sk-to-${local.logging_target_name_prefix}-${var.logging_destination_project_id}" - logging_target_name_prefix = var.logging_target_type == "bigquery" ? "ds_logs_" : var.logging_target_type == "pubsub" ? "topic-logs-" : var.logging_target_type == "storage" ? "bkt-logs-" : "logbkt-logs-" - logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${local.logging_target_name_prefix}${random_string.suffix.result}" + key_first_resource = keys(var.resources)[0] + logbucket_sink_member = { for k, v in var.resources : k => v if k != var.logging_project_key } + create_destination = !(length(var.logging_destination_uri) > 0) + logging_sink_name = length(var.logging_sink_name) > 0 ? var.logging_sink_name : "sk-to-${lookup(local.logging_sink_name_map, var.logging_target_type, "log_dest_")}" + logging_sink_name_map = { + bigquery = try("ds-logs-${var.logging_destination_project_id}", "ds-logs") + pubsub = try("tp-logs-${var.logging_destination_project_id}", "tp-logs") + storage = try("bkt-logs-${var.logging_destination_project_id}", "bkt-logs") + logbucket = try("logbkt-logs-${var.logging_destination_project_id}", "logbkt-logs") + } + resource_name = lookup(local.resource_name_map, var.logging_target_type, "") + resource_name_map = { + bigquery = try(module.destination_bigquery[0].resource_name, "") + pubsub = try(module.destination_pubsub[0].resource_name, "") + storage = try(module.destination_storage[0].resource_name, "") + logbucket = try(module.destination_logbucket[0].resource_name, "") + } + destination_uri = length(var.logging_destination_uri) > 0 ? var.logging_destination_uri : lookup(local.destination_uri_map, var.logging_target_type, "") + destination_uri_map = { + bigquery = try(module.destination_bigquery[0].destination_uri, "") + pubsub = try(module.destination_pubsub[0].destination_uri, "") + storage = try(module.destination_storage[0].destination_uri, "") + logbucket = try(module.destination_logbucket[0].destination_uri, "") + } + logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${lookup(local.logging_target_name_prefix, var.logging_target_type, "log_dest_")}${random_string.suffix.result}" + logging_target_name_prefix = { + bigquery = "ds_logs_" + pubsub = "tp-logs-" + storage = try("bkt-logs-${var.logging_destination_project_id}-", "bkt-logs-") + logbucket = "logbkt-logs-" + } bigquery_options = var.logging_target_type == "bigquery" && var.bigquery_options != null ? var.bigquery_options : null # Bigquery sink options - Enabling option use_partitioned_tables will store logs into a single table that is internally partitioned by day which can improve query performance. } resource "random_string" "suffix" { - length = 8 + length = 4 upper = false special = false } From 96640b7b0f52847a557a472456ad34de8f27ab46 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio <108549791+felipecrescencio-cit@users.noreply.github.com> Date: Tue, 23 Aug 2022 09:10:31 -0300 Subject: [PATCH 20/27] Fix var description Co-authored-by: Bharath KKB --- 1-org/modules/centralized-logging/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index 0b1a10c82..10479275b 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -62,7 +62,7 @@ variable "logging_target_name" { } variable "logging_destination_uri" { - description = "The self_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources." + description = "The self_link URI of the destination resource. If provided all needed permissions will be assigned and this resource will be used as log destination for all resources." type = string default = "" } From bc26bf3db8311cb44664199c899819e8f6058cfc Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 29 Aug 2022 19:15:48 -0300 Subject: [PATCH 21/27] Refactor all destinations in one module call --- 1-org/envs/shared/log_sinks.tf | 106 +++---- 1-org/envs/shared/outputs.tf | 6 +- 1-org/modules/centralized-logging/README.md | 39 +-- 1-org/modules/centralized-logging/main.tf | 155 +++++---- 1-org/modules/centralized-logging/outputs.tf | 40 +-- .../modules/centralized-logging/variables.tf | 300 ++++++++---------- 6 files changed, 279 insertions(+), 367 deletions(-) diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index fa7e45d23..81fd4d246 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -35,85 +35,63 @@ resource "random_string" "suffix" { special = false } -/****************************************** - Send logs to BigQuery -*****************************************/ - -module "bigquery_destination" { +module "logs_export" { source = "../../modules/centralized-logging" - resources = local.parent_resources - resource_type = local.parent_resource_type - logging_sink_filter = local.main_logs_filter - logging_sink_name = "sk-c-logging-bq" - logging_target_type = "bigquery" - include_children = true + resources = local.parent_resources + resource_type = local.parent_resource_type + logging_destination_project_id = module.org_audit_logs.project_id + + /****************************************** + Send logs to BigQuery +*****************************************/ bigquery_options = { - use_partitioned_tables = true + logging_sink_name = "sk-c-logging-bq" + logging_sink_filter = local.main_logs_filter + dataset_name = "audit_logs" + partitioned_tables = "true" + include_children = true + expiration_days = var.audit_logs_table_expiration_days + delete_contents_on_destroy = var.audit_logs_table_delete_contents_on_destroy } - logging_destination_project_id = module.org_audit_logs.project_id - logging_target_name = "audit_logs" - expiration_days = var.audit_logs_table_expiration_days - delete_contents_on_destroy = var.audit_logs_table_delete_contents_on_destroy -} -/****************************************** + /****************************************** Send logs to Storage *****************************************/ + storage_options = { + logging_sink_filter = local.all_logs_filter + logging_sink_name = "sk-c-logging-bkt" + include_children = true + storage_bucket_name = "bkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" + location = var.log_export_storage_location + retention_policy = var.log_export_storage_retention_policy + force_destroy = var.log_export_storage_force_destroy + versioning = var.log_export_storage_versioning + } -module "storage_destination" { - source = "../../modules/centralized-logging" - - logging_sink_filter = local.all_logs_filter - logging_sink_name = "sk-c-logging-bkt" - resources = local.parent_resources - resource_type = local.parent_resource_type - include_children = true - logging_target_type = "storage" - logging_destination_project_id = module.org_audit_logs.project_id - logging_target_name = "bkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" - uniform_bucket_level_access = true - logging_location = var.log_export_storage_location - retention_policy = var.log_export_storage_retention_policy - delete_contents_on_destroy = var.log_export_storage_force_destroy - versioning = var.log_export_storage_versioning -} - -/****************************************** + /****************************************** Send logs to Pub\Sub *****************************************/ + pubsub_options = { + logging_sink_filter = local.main_logs_filter + logging_sink_name = "sk-c-logging-pub" + include_children = true + topic_name = "tp-org-logs-${random_string.suffix.result}" + create_subscriber = true + } -module "pubsub_destination" { - source = "../../modules/centralized-logging" - - logging_sink_filter = local.main_logs_filter - logging_sink_name = "sk-c-logging-pub" - resources = local.parent_resources - resource_type = local.parent_resource_type - include_children = true - logging_target_type = "pubsub" - logging_destination_project_id = module.org_audit_logs.project_id - logging_target_name = "tp-org-logs-${random_string.suffix.result}" - create_subscriber = true -} - -/****************************************** + /****************************************** Send logs to Logbucket *****************************************/ -module "logbucket_destination" { - source = "../../modules/centralized-logging" - - logging_sink_filter = local.all_logs_filter - logging_sink_name = "sk-c-logging-logbkt" - resources = local.parent_resources - resource_type = local.parent_resource_type - include_children = true - logging_target_type = "logbucket" - logging_destination_project_id = module.org_audit_logs.project_id - logging_target_name = "logbkt-org-logs-${random_string.suffix.result}" - logging_location = var.default_region + logbucket_options = { + logging_sink_name = "sk-c-logging-logbkt" + logging_sink_filter = local.all_logs_filter + include_children = true + name = "logbkt-org-logs-${random_string.suffix.result}" + } } + /****************************************** Billing logs (Export configured manually) *****************************************/ diff --git a/1-org/envs/shared/outputs.tf b/1-org/envs/shared/outputs.tf index 5b800a233..8c0cb6da4 100644 --- a/1-org/envs/shared/outputs.tf +++ b/1-org/envs/shared/outputs.tf @@ -85,16 +85,16 @@ output "domains_to_allow" { } output "logs_export_pubsub_topic" { - value = module.pubsub_destination.resource_name + value = module.logs_export.resource_name_pubsub description = "The Pub/Sub topic for destination of log exports" } output "logs_export_storage_bucket_name" { - value = module.storage_destination.resource_name + value = module.logs_export.resource_name_storage description = "The storage bucket for destination of log exports" } output "logs_export_logbucket_name" { - value = module.logbucket_destination.resource_name + value = module.logs_export.resource_name_logbucket description = "The log bucket for destination of log exports. See https://cloud.google.com/logging/docs/routing/overview#buckets" } diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index fa2f4c149..1ead6fa75 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -67,45 +67,22 @@ module "logging_logbucket" { | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| bigquery\_options | (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables. Applies to logging target type: bigquery. |
object({
use_partitioned_tables = bool
})
| `null` | no | -| create\_push\_subscriber | (Optional) Whether to add a push configuration to the subcription. If 'true', a push subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub. | `bool` | `false` | no | -| create\_subscriber | (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic. Applies to logging target type: pubsub. | `bool` | `false` | no | -| dataset\_description | (Optional) A user-friendly description of the dataset. Applies to logging target type: bigquery. | `string` | `""` | no | -| delete\_contents\_on\_destroy | (Optional) If set to true, delete all contained objects in the logging destination. Applies to logging target types: bigquery and storage. | `bool` | `false` | no | -| exclusions | (Optional) A list of sink exclusion filters. |
list(object({
name = string,
description = string,
filter = string,
disabled = bool
}))
| `[]` | no | -| expiration\_days | (Optional) Table expiration time. If null logs will never be deleted. Applies to logging target type: bigquery. | `number` | `null` | no | -| include\_children | Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. | `bool` | `false` | no | -| kms\_key\_name | (Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to logging target types: bigquery, storage, and pubsub. | `string` | `null` | no | -| labels | (Optional) Labels attached to logging resources. | `map(string)` | `{}` | no | -| lifecycle\_rules | (Optional) List of lifecycle rules to configure. Format is the same as described in provider documentation https://www.terraform.io/docs/providers/google/r/storage_bucket.html#lifecycle_rule except condition.matches\_storage\_class should be a comma delimited string. Applies to logging target type: storage. |
set(object({
# Object with keys:
# - type - The type of the action of this Lifecycle Rule. Supported values: Delete and SetStorageClass.
# - storage_class - (Required if action type is SetStorageClass) The target Storage Class of objects affected by this Lifecycle Rule.
action = map(string)

# Object with keys:
# - age - (Optional) Minimum age of an object in days to satisfy this condition.
# - created_before - (Optional) Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.
# - with_state - (Optional) Match to live and/or archived objects. Supported values include: "LIVE", "ARCHIVED", "ANY".
# - matches_storage_class - (Optional) Comma delimited string for storage class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, DURABLE_REDUCED_AVAILABILITY.
# - num_newer_versions - (Optional) Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.
# - days_since_custom_time - (Optional) The number of days from the Custom-Time metadata attribute after which this condition becomes true.
condition = map(string)
}))
| `[]` | no | +| bigquery\_options | Destination BigQuery options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- dataset\_name: The name of the bigquery dataset to be created and used for log entries.
- expiration\_days: (Optional) Table expiration time. If null logs will never be deleted.
- partitioned\_tables: (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables.
- delete\_contents\_on\_destroy: (Optional) If set to true, delete all contained objects in the logging destination.

Destination BigQuery options example:
bigquery_options = {
logging_sink_name = "sk-c-logging-bq"
dataset_name = "audit_logs"
partitioned_tables = "true"
include_children = "true"
expiration_days = 30
delete_contents_on_destroy = false
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(string)` | `null` | no | +| logbucket\_options | Destination LogBucket options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- name: The name of the log bucket to be created and used for log entries matching the filter.
- location: The location of the log bucket. Default: global.
- retention\_days: (Optional) The number of days data should be retained for the log bucket. Default 30.

Destination LogBucket options example:
logbucket_options = {
logging_sink_name = "sk-c-logging-logbkt"
logging_sink_filter = ""
name = "logbkt-org-logs"
retention_days = "30"
include_children = "true"
location = "global"
}
| `map(any)` | `null` | no | | logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | -| logging\_destination\_uri | The self\_link URI of the destination resource. If provided all needed permitions will be assinged and this resource will be used as log destination for all resources. | `string` | `""` | no | -| logging\_location | (Optional) The location of the logging destination. Applies to logging target types: bigquery and storage. | `string` | `"US"` | no | | logging\_project\_key | (Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource\_type = project and logging\_target\_type = logbucket. | `string` | `""` | no | -| logging\_sink\_filter | The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. | `string` | `""` | no | -| logging\_sink\_name | The name of the log sink to be created. | `string` | `""` | no | -| logging\_target\_name | The name of the logging container (logbucket, bigquery-dataset, storage, or pubsub-topic) that will store the logs. | `string` | `""` | no | -| logging\_target\_type | Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub. | `string` | n/a | yes | -| push\_endpoint | (Optional) The URL locating the endpoint to which messages should be pushed. Applies to logging target type: pubsub. | `string` | `""` | no | +| pubsub\_options | Destination Pubsub options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- topic\_name: The name of the pubsub topic to be created and used for log entries matching the filter.
- create\_subscriber: (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic.

Destination Storage options example:
pubsub_options = {
logging_sink_name = "sk-c-logging-pub"
include_children = true
topic_name = "tp-org-logs"
create_subscriber = true
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(any)` | `null` | no | | resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization, or folder. | `string` | n/a | yes | | resources | Export logs from the specified resources. | `map(string)` | n/a | yes | -| retention\_days | (Optional) The number of days data should be retained for the log bucket. Applies to logging target type: logbucket. | `number` | `30` | no | -| retention\_policy | (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained. Applies to logging target type: storage. |
object({
is_locked = bool
retention_period_days = number
})
| `null` | no | -| storage\_class | (Optional) The storage class of the storage bucket. Applies to logging target type: storage. | `string` | `"STANDARD"` | no | -| subscriber\_id | (Optional) The ID to give the pubsub pull subscriber service account. Applies to logging target type: pubsub. | `string` | `""` | no | -| subscription\_labels | (Optional) A set of key/value label pairs to assign to the pubsub subscription. Applies to logging target type: pubsub. | `map(string)` | `{}` | no | -| uniform\_bucket\_level\_access | (Optional) Enables Uniform bucket-level access to a bucket. Applies to logging target type: storage. | `bool` | `true` | no | -| versioning | (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted. Applies to logging target type: storage. | `bool` | `false` | no | +| storage\_options | Destination Storage options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- storage\_bucket\_name: The name of the storage bucket to be created and used for log entries matching the filter.
- location: (Optional) The location of the logging destination. Default: US.
- Retention Policy variables: (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.
- retention\_policy\_is\_locked: Set if policy is locked.
- retention\_policy\_period\_days: Set the period of days for log retention. Default: 30.
- versioning: (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted.
- force\_destroy: When deleting a bucket, this boolean option will delete all contained objects.

Destination Storage options example:
storage_options = {
logging_sink_name = "sk-c-logging-bkt"
logging_sink_filter = ""
include_children = "true"
storage_bucket_name = "bkt-org-logs"
location = "US"
force_destroy = false
versioning = false
}
| `map(any)` | `null` | no | ## Outputs | Name | Description | |------|-------------| -| destination\_uri | The destination URI for the selected logging target type. | -| filter | The filter to be applied when exporting logs. | -| log\_sinks\_id | The resource ID of the log sink that was created. | -| log\_sinks\_name | The resource name of the log sink that was created. | -| parent\_resource\_ids | The ID of the GCP resource in which you create the log sink. | -| resource\_name | The resource name for the destination | +| resource\_name\_bigquery | The resource name for the destination BigQuery. | +| resource\_name\_logbucket | The resource name for the destination Log Bucket. | +| resource\_name\_pubsub | The resource name for the destination Pub/Sub. | +| resource\_name\_storage | The resource name for the destination Storage. | diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index e1e353cda..fbbcf553e 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -15,40 +15,49 @@ */ locals { - key_first_resource = keys(var.resources)[0] + value_first_resource = values(var.resources)[0] logbucket_sink_member = { for k, v in var.resources : k => v if k != var.logging_project_key } - create_destination = !(length(var.logging_destination_uri) > 0) - logging_sink_name = length(var.logging_sink_name) > 0 ? var.logging_sink_name : "sk-to-${lookup(local.logging_sink_name_map, var.logging_target_type, "log_dest_")}" + + all_sinks = merge( + var.bigquery_options == null ? {} : { + for k, v in var.resources : "${v}_bgq" => var.bigquery_options + }, + var.logbucket_options == null ? {} : { + for k, v in var.resources : "${v}_lbk" => var.logbucket_options + }, + var.pubsub_options == null ? {} : { + for k, v in var.resources : "${v}_pub" => var.pubsub_options + }, + var.storage_options == null ? {} : { + for k, v in var.resources : "${v}_sto" => var.storage_options + }, + ) + logging_sink_name_map = { - bigquery = try("ds-logs-${var.logging_destination_project_id}", "ds-logs") - pubsub = try("tp-logs-${var.logging_destination_project_id}", "tp-logs") - storage = try("bkt-logs-${var.logging_destination_project_id}", "bkt-logs") - logbucket = try("logbkt-logs-${var.logging_destination_project_id}", "logbkt-logs") + bgq = try("ds-logs-${var.logging_destination_project_id}", "ds-logs") + pub = try("tp-logs-${var.logging_destination_project_id}", "tp-logs") + sto = try("bkt-logs-${var.logging_destination_project_id}", "bkt-logs") + lbk = try("logbkt-logs-${var.logging_destination_project_id}", "logbkt-logs") } - resource_name = lookup(local.resource_name_map, var.logging_target_type, "") - resource_name_map = { - bigquery = try(module.destination_bigquery[0].resource_name, "") - pubsub = try(module.destination_pubsub[0].resource_name, "") - storage = try(module.destination_storage[0].resource_name, "") - logbucket = try(module.destination_logbucket[0].resource_name, "") - } - destination_uri = length(var.logging_destination_uri) > 0 ? var.logging_destination_uri : lookup(local.destination_uri_map, var.logging_target_type, "") destination_uri_map = { - bigquery = try(module.destination_bigquery[0].destination_uri, "") - pubsub = try(module.destination_pubsub[0].destination_uri, "") - storage = try(module.destination_storage[0].destination_uri, "") - logbucket = try(module.destination_logbucket[0].destination_uri, "") + bgq = try(module.destination_bigquery[0].destination_uri, "") + pub = try(module.destination_pubsub[0].destination_uri, "") + sto = try(module.destination_storage[0].destination_uri, "") + lbk = try(module.destination_logbucket[0].destination_uri, "") } - logging_target_name = length(var.logging_target_name) > 0 ? var.logging_target_name : "${lookup(local.logging_target_name_prefix, var.logging_target_type, "log_dest_")}${random_string.suffix.result}" logging_target_name_prefix = { - bigquery = "ds_logs_" - pubsub = "tp-logs-" - storage = try("bkt-logs-${var.logging_destination_project_id}-", "bkt-logs-") - logbucket = "logbkt-logs-" + bgq = "ds_logs_" + pub = "tp-logs-" + sto = try("bkt-logs-${var.logging_destination_project_id}-", "bkt-logs-") + lbk = "logbkt-logs-" + } + + part_tables = { + false = { use_partitioned_tables = false } + true = { use_partitioned_tables = true } } - bigquery_options = var.logging_target_type == "bigquery" && var.bigquery_options != null ? var.bigquery_options : null - # Bigquery sink options - Enabling option use_partitioned_tables will store logs into a single table that is internally partitioned by day which can improve query performance. + bgq_options_part_tables = var.bigquery_options == null ? local.part_tables.false : !contains(keys(var.bigquery_options), "partitioned_tables") ? local.part_tables.false : var.bigquery_options.partitioned_tables == "true" ? local.part_tables.true : local.part_tables.false } resource "random_string" "suffix" { @@ -61,20 +70,18 @@ module "log_export" { source = "terraform-google-modules/log-export/google" version = "~> 7.3.0" - for_each = var.resources + for_each = local.all_sinks - destination_uri = local.destination_uri - filter = var.logging_sink_filter - log_sink_name = local.logging_sink_name - parent_resource_id = each.value + destination_uri = lookup(each.value, "destination_uri", lookup(local.destination_uri_map, substr(each.key, -3, -1), "")) + filter = lookup(each.value, "logging_sink_filter", "") + log_sink_name = lookup(each.value, "logging_sink_name", "sk-to-${lookup(local.logging_sink_name_map, substr(each.key, -3, -1), "log_dest_")}") + parent_resource_id = substr(each.key, 0, length(each.key) - 4) parent_resource_type = var.resource_type unique_writer_identity = true - include_children = var.include_children - bigquery_options = local.bigquery_options - exclusions = var.exclusions + include_children = tobool(lookup(each.value, "include_children", "false")) + bigquery_options = substr(each.key, -3, -1) == "bgq" ? local.bgq_options_part_tables : null } - #-------------------------# # Send logs to Log Bucket # #-------------------------# @@ -84,13 +91,13 @@ module "destination_logbucket" { source = "github.com/terraform-google-modules/terraform-google-log-export//modules/logbucket" - count = local.create_destination && var.logging_target_type == "logbucket" ? 1 : 0 + count = var.logbucket_options != null ? 1 : 0 project_id = var.logging_destination_project_id - name = local.logging_target_name - log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity - location = var.logging_location - retention_days = var.retention_days + name = lookup(var.logbucket_options, "name", "${lookup(local.logging_target_name_prefix, "lbk", "log_dest_")}${random_string.suffix.result}") + log_sink_writer_identity = module.log_export["${local.value_first_resource}_lbk"].writer_identity + location = lookup(var.logbucket_options, "location", "global") + retention_days = lookup(var.logbucket_options, "retention_days", 30) grant_write_permission_on_bkt = false } @@ -98,13 +105,14 @@ module "destination_logbucket" { # Log Bucket Service account IAM membership # #-------------------------------------------# resource "google_project_iam_member" "logbucket_sink_member" { - for_each = var.logging_target_type == "logbucket" ? local.logbucket_sink_member : {} + for_each = var.logbucket_options != null ? local.logbucket_sink_member : {} project = var.logging_destination_project_id role = "roles/logging.bucketWriter" - member = module.log_export[each.key].writer_identity + member = module.log_export["${each.value}_lbk"].writer_identity } + #-----------------------# # Send logs to BigQuery # #-----------------------# @@ -112,27 +120,24 @@ module "destination_bigquery" { source = "terraform-google-modules/log-export/google//modules/bigquery" version = "~> 7.3.0" - count = local.create_destination && var.logging_target_type == "bigquery" ? 1 : 0 + count = var.bigquery_options != null ? 1 : 0 project_id = var.logging_destination_project_id - dataset_name = replace(local.logging_target_name, "-", "_") - log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity - labels = var.labels - description = var.dataset_description - kms_key_name = var.kms_key_name - expiration_days = var.expiration_days - delete_contents_on_destroy = var.delete_contents_on_destroy + dataset_name = replace(lookup(var.bigquery_options, "dataset_name", "${lookup(local.logging_target_name_prefix, "bgq", "log_dest_")}${random_string.suffix.result}"), "-", "_") + log_sink_writer_identity = module.log_export["${local.value_first_resource}_bgq"].writer_identity + expiration_days = lookup(var.bigquery_options, "expiration_days", null) + delete_contents_on_destroy = lookup(var.bigquery_options, "delete_contents_on_destroy", false) } #-----------------------------------------# # Bigquery Service account IAM membership # #-----------------------------------------# resource "google_project_iam_member" "bigquery_sink_member" { - for_each = var.logging_target_type == "bigquery" ? var.resources : {} + for_each = var.bigquery_options != null ? var.resources : {} project = var.logging_destination_project_id role = "roles/bigquery.dataEditor" - member = module.log_export[each.key].writer_identity + member = module.log_export["${each.value}_bgq"].writer_identity } @@ -143,31 +148,31 @@ module "destination_storage" { source = "terraform-google-modules/log-export/google//modules/storage" version = "~> 7.3.0" - count = local.create_destination && var.logging_target_type == "storage" ? 1 : 0 + count = var.storage_options != null ? 1 : 0 project_id = var.logging_destination_project_id - storage_bucket_name = local.logging_target_name - log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity - kms_key_name = var.kms_key_name - uniform_bucket_level_access = var.uniform_bucket_level_access - location = var.logging_location - storage_bucket_labels = var.labels - force_destroy = var.delete_contents_on_destroy - retention_policy = var.retention_policy - lifecycle_rules = var.lifecycle_rules - storage_class = var.storage_class - versioning = var.versioning + storage_bucket_name = lookup(var.storage_options, "storage_bucket_name", "${lookup(local.logging_target_name_prefix, "sto", "log_dest_")}${random_string.suffix.result}") + log_sink_writer_identity = module.log_export["${local.value_first_resource}_sto"].writer_identity + uniform_bucket_level_access = true + location = lookup(var.storage_options, "location", "US") + force_destroy = lookup(var.storage_options, "force_destroy", "false") + versioning = lookup(var.storage_options, "versioning", "false") + + retention_policy = !contains(keys(var.storage_options), "retention_policy_is_locked") ? null : { + is_locked = tobool(lookup(var.storage_options, "retention_policy_is_locked", "false")) + retention_period_days = tonumber(lookup(var.storage_options, "retention_policy_period_days", "30")) + } } #----------------------------------------# # Storage Service account IAM membership # #----------------------------------------# resource "google_storage_bucket_iam_member" "storage_sink_member" { - for_each = var.logging_target_type == "storage" ? module.log_export : {} + for_each = var.storage_options != null ? var.resources : {} bucket = module.destination_storage[0].resource_name role = "roles/storage.objectCreator" - member = each.value.writer_identity + member = module.log_export["${each.value}_sto"].writer_identity } @@ -178,28 +183,22 @@ module "destination_pubsub" { source = "terraform-google-modules/log-export/google//modules/pubsub" version = "~> 7.3.0" - count = local.create_destination && var.logging_target_type == "pubsub" ? 1 : 0 + count = var.pubsub_options != null ? 1 : 0 project_id = var.logging_destination_project_id - topic_name = local.logging_target_name - log_sink_writer_identity = module.log_export[local.key_first_resource].writer_identity - kms_key_name = var.kms_key_name - topic_labels = var.labels - create_subscriber = var.create_subscriber - subscription_labels = var.subscription_labels - create_push_subscriber = var.create_push_subscriber - push_endpoint = var.push_endpoint - subscriber_id = var.subscriber_id + topic_name = lookup(var.pubsub_options, "topic_name", "${lookup(local.logging_target_name_prefix, "pub", "log_dest_")}${random_string.suffix.result}") + log_sink_writer_identity = module.log_export["${local.value_first_resource}_pub"].writer_identity + create_subscriber = lookup(var.pubsub_options, "create_subscriber", false) } #---------------------------------------# # Pubsub Service account IAM membership # #---------------------------------------# resource "google_pubsub_topic_iam_member" "pubsub_sink_member" { - for_each = var.logging_target_type == "pubsub" ? module.log_export : {} + for_each = var.pubsub_options != null ? var.resources : {} project = var.logging_destination_project_id topic = module.destination_pubsub[0].resource_name role = "roles/pubsub.publisher" - member = each.value.writer_identity + member = module.log_export["${each.value}_pub"].writer_identity } diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf index 3a890aed1..4bcfbbe38 100644 --- a/1-org/modules/centralized-logging/outputs.tf +++ b/1-org/modules/centralized-logging/outputs.tf @@ -14,38 +14,22 @@ * limitations under the License. */ -output "destination_uri" { - description = "The destination URI for the selected logging target type." - value = local.destination_uri +output "resource_name_bigquery" { + description = "The resource name for the destination BigQuery." + value = module.destination_bigquery[0].resource_name } -output "filter" { - description = "The filter to be applied when exporting logs." - value = var.logging_sink_filter +output "resource_name_storage" { + description = "The resource name for the destination Storage." + value = module.destination_storage[0].resource_name } -output "log_sinks_id" { - description = "The resource ID of the log sink that was created." - value = toset([ - for value in module.log_export : value.log_sink_resource_id - ]) +output "resource_name_pubsub" { + description = "The resource name for the destination Pub/Sub." + value = module.destination_pubsub[0].resource_name } -output "log_sinks_name" { - description = "The resource name of the log sink that was created." - value = toset([ - for value in module.log_export : value.log_sink_resource_name - ]) -} - -output "parent_resource_ids" { - description = "The ID of the GCP resource in which you create the log sink." - value = toset([ - for value in module.log_export : value.parent_resource_id - ]) -} - -output "resource_name" { - description = "The resource name for the destination" - value = local.resource_name +output "resource_name_logbucket" { + description = "The resource name for the destination Log Bucket." + value = module.destination_logbucket[0].resource_name } diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index 10479275b..edc7244b3 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -45,191 +45,165 @@ variable "logging_destination_project_id" { type = string } -variable "logging_target_type" { - description = "Resource type of the resource that will store the logs. Must be: logbucket, bigquery, storage, or pubsub." - type = string - - validation { - condition = contains(["bigquery", "storage", "pubsub", "logbucket"], var.logging_target_type) - error_message = "The logging_target_type value must be: logbucket, bigquery, storage, or pubsub." - } -} - -variable "logging_target_name" { - description = "The name of the logging container (logbucket, bigquery-dataset, storage, or pubsub-topic) that will store the logs." - type = string - default = "" -} - -variable "logging_destination_uri" { - description = "The self_link URI of the destination resource. If provided all needed permissions will be assigned and this resource will be used as log destination for all resources." - type = string - default = "" -} - -variable "logging_sink_name" { - description = "The name of the log sink to be created." - type = string - default = "" -} - -variable "logging_sink_filter" { - description = "The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs." - type = string - default = "" -} - -variable "include_children" { - description = "Only valid if 'organization' or 'folder' is chosen as var.resource_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included." - type = bool - default = false -} - -variable "exclusions" { - description = "(Optional) A list of sink exclusion filters." - type = list(object({ - name = string, - description = string, - filter = string, - disabled = bool - })) - default = [] -} -variable "bigquery_options" { - description = "(Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables. Applies to logging target type: bigquery." - type = object({ - use_partitioned_tables = bool - }) - default = null -} - -variable "labels" { - description = "(Optional) Labels attached to logging resources." - type = map(string) - default = {} -} -variable "kms_key_name" { - description = "(Optional) ID of a Cloud KMS CryptoKey that will be used to encrypt the logging destination. Applies to logging target types: bigquery, storage, and pubsub." - type = string +#----------------------------- # +# Logbucket specific variables # +#----------------------------- # +variable "logbucket_options" { + description = < Date: Mon, 29 Aug 2022 20:41:20 -0300 Subject: [PATCH 22/27] Duplicated validation Removed --- 1-org/modules/centralized-logging/variables.tf | 5 ----- 1 file changed, 5 deletions(-) diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index edc7244b3..410679a20 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -166,11 +166,6 @@ EOT condition = var.storage_options == null ? true : !contains(keys(var.storage_options), "include_children") ? true : can(tobool(var.storage_options["include_children"])) error_message = "Include_children option must be a bool (true or false). Default false." } - - validation { - condition = var.storage_options == null ? true : !contains(keys(var.storage_options), "include_children") ? true : can(tobool(var.storage_options["include_children"])) - error_message = "Include_children option must be a bool (true or false). Default false." - } } From 68da348e47859da537276beab98b70eef82da4d6 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 29 Aug 2022 20:41:39 -0300 Subject: [PATCH 23/27] Fix handle retention_policy object --- 1-org/envs/shared/log_sinks.tf | 18 ++++++++++-------- 1-org/modules/centralized-logging/main.tf | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index 81fd4d246..570eabfcd 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -59,16 +59,18 @@ module "logs_export" { Send logs to Storage *****************************************/ storage_options = { - logging_sink_filter = local.all_logs_filter - logging_sink_name = "sk-c-logging-bkt" - include_children = true - storage_bucket_name = "bkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" - location = var.log_export_storage_location - retention_policy = var.log_export_storage_retention_policy - force_destroy = var.log_export_storage_force_destroy - versioning = var.log_export_storage_versioning + logging_sink_filter = local.all_logs_filter + logging_sink_name = "sk-c-logging-bkt" + include_children = true + storage_bucket_name = "bkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" + location = var.log_export_storage_location + retention_policy_is_locked = var.log_export_storage_retention_policy == null ? null : var.log_export_storage_retention_policy.is_locked + retention_policy_period_days = var.log_export_storage_retention_policy == null ? null : var.log_export_storage_retention_policy.retention_period_days + force_destroy = var.log_export_storage_force_destroy + versioning = var.log_export_storage_versioning } + /****************************************** Send logs to Pub\Sub *****************************************/ diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index fbbcf553e..09bc4f587 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -158,7 +158,7 @@ module "destination_storage" { force_destroy = lookup(var.storage_options, "force_destroy", "false") versioning = lookup(var.storage_options, "versioning", "false") - retention_policy = !contains(keys(var.storage_options), "retention_policy_is_locked") ? null : { + retention_policy = !contains(keys(var.storage_options), "retention_policy_is_locked") ? null : var.storage_options.retention_policy_is_locked == null ? null : { is_locked = tobool(lookup(var.storage_options, "retention_policy_is_locked", "false")) retention_period_days = tonumber(lookup(var.storage_options, "retention_policy_period_days", "30")) } From 83cfae8d9d50284787da167e66917607922e252c Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 29 Aug 2022 21:45:31 -0300 Subject: [PATCH 24/27] Fix added logbucket default location --- 1-org/envs/shared/log_sinks.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index 570eabfcd..4bda7606b 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -90,6 +90,7 @@ module "logs_export" { logging_sink_filter = local.all_logs_filter include_children = true name = "logbkt-org-logs-${random_string.suffix.result}" + location = var.default_region } } From 9ee1208a4dbac6c5e1791251e175330a8dad0046 Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Mon, 29 Aug 2022 21:46:04 -0300 Subject: [PATCH 25/27] Fix test output values to not break module --- 1-org/modules/centralized-logging/outputs.tf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf index 4bcfbbe38..41c2b37fd 100644 --- a/1-org/modules/centralized-logging/outputs.tf +++ b/1-org/modules/centralized-logging/outputs.tf @@ -16,20 +16,20 @@ output "resource_name_bigquery" { description = "The resource name for the destination BigQuery." - value = module.destination_bigquery[0].resource_name + value = try(module.destination_bigquery[0].resource_name, "") } output "resource_name_storage" { description = "The resource name for the destination Storage." - value = module.destination_storage[0].resource_name + value = try(module.destination_storage[0].resource_name, "") } output "resource_name_pubsub" { description = "The resource name for the destination Pub/Sub." - value = module.destination_pubsub[0].resource_name + value = try(module.destination_pubsub[0].resource_name, "") } output "resource_name_logbucket" { description = "The resource name for the destination Log Bucket." - value = module.destination_logbucket[0].resource_name + value = try(module.destination_logbucket[0].resource_name, "") } From 5d7400f5753650669b0528bf290aa416423a2a3e Mon Sep 17 00:00:00 2001 From: Felipe Crescencio Date: Fri, 2 Sep 2022 10:46:41 -0300 Subject: [PATCH 26/27] Fix PR reviews --- 1-org/envs/shared/log_sinks.tf | 8 +- 1-org/modules/centralized-logging/README.md | 55 +++++----- 1-org/modules/centralized-logging/main.tf | 102 ++++++++++-------- 1-org/modules/centralized-logging/outputs.tf | 8 +- .../modules/centralized-logging/variables.tf | 23 ---- 5 files changed, 93 insertions(+), 103 deletions(-) diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index bf7b3d69d..300077444 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -49,8 +49,6 @@ module "logs_export" { logging_sink_name = "sk-c-logging-bq" logging_sink_filter = local.main_logs_filter dataset_name = "audit_logs" - partitioned_tables = "true" - include_children = true expiration_days = var.audit_logs_table_expiration_days delete_contents_on_destroy = var.audit_logs_table_delete_contents_on_destroy } @@ -61,7 +59,6 @@ module "logs_export" { storage_options = { logging_sink_filter = local.all_logs_filter logging_sink_name = "sk-c-logging-bkt" - include_children = true storage_bucket_name = "bkt-${module.org_audit_logs.project_id}-org-logs-${random_string.suffix.result}" location = var.log_export_storage_location retention_policy_is_locked = var.log_export_storage_retention_policy == null ? null : var.log_export_storage_retention_policy.is_locked @@ -70,14 +67,12 @@ module "logs_export" { versioning = var.log_export_storage_versioning } - /****************************************** Send logs to Pub\Sub *****************************************/ pubsub_options = { logging_sink_filter = local.main_logs_filter logging_sink_name = "sk-c-logging-pub" - include_children = true topic_name = "tp-org-logs-${random_string.suffix.result}" create_subscriber = true } @@ -88,9 +83,8 @@ module "logs_export" { logbucket_options = { logging_sink_name = "sk-c-logging-logbkt" logging_sink_filter = local.all_logs_filter - include_children = true name = "logbkt-org-logs-${random_string.suffix.result}" - location = var.default_region + location = local.default_region } } diff --git a/1-org/modules/centralized-logging/README.md b/1-org/modules/centralized-logging/README.md index 1ead6fa75..6b08b5e4f 100644 --- a/1-org/modules/centralized-logging/README.md +++ b/1-org/modules/centralized-logging/README.md @@ -1,6 +1,6 @@ # Centralized Logging Module -This module handles logging configuration enabling one or more resources such as organization, folders, or projects to send logs to a destination: [GCS bucket](https://cloud.google.com/logging/docs/export/using_exported_logs#gcs-overview), [Big Query](https://cloud.google.com/logging/docs/export/bigquery), [Pub/Sub](https://cloud.google.com/logging/docs/export/using_exported_logs#pubsub-overview), or [Log Buckets](https://cloud.google.com/logging/docs/routing/overview#buckets). +This module handles logging configuration enabling one or more resources such as organization, folders, or projects to send logs to multiple destinations: [GCS bucket](https://cloud.google.com/logging/docs/export/using_exported_logs#gcs-overview), [Big Query](https://cloud.google.com/logging/docs/export/bigquery), [Pub/Sub](https://cloud.google.com/logging/docs/export/using_exported_logs#pubsub-overview), and [Log Buckets](https://cloud.google.com/logging/docs/routing/overview#buckets). ## Usage @@ -9,7 +9,7 @@ Before using this module, get familiar with the [log-export](https://registry.te The following example exports audit logs from two folders to the same storage destination: ```hcl -module "logging_storage" { +module "logs_export" { source = "terraform-google-modules/terraform-example-foundation/google//1-org/modules/centralized-logging" resources = { @@ -17,7 +17,19 @@ module "logging_storage" { fldr2 = "" } resource_type = "folder" - logging_sink_filter = <- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- dataset\_name: The name of the bigquery dataset to be created and used for log entries.
- expiration\_days: (Optional) Table expiration time. If null logs will never be deleted.
- partitioned\_tables: (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables.
- delete\_contents\_on\_destroy: (Optional) If set to true, delete all contained objects in the logging destination.

Destination BigQuery options example:
bigquery_options = {
logging_sink_name = "sk-c-logging-bq"
dataset_name = "audit_logs"
partitioned_tables = "true"
include_children = "true"
expiration_days = 30
delete_contents_on_destroy = false
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(string)` | `null` | no | -| logbucket\_options | Destination LogBucket options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- name: The name of the log bucket to be created and used for log entries matching the filter.
- location: The location of the log bucket. Default: global.
- retention\_days: (Optional) The number of days data should be retained for the log bucket. Default 30.

Destination LogBucket options example:
logbucket_options = {
logging_sink_name = "sk-c-logging-logbkt"
logging_sink_filter = ""
name = "logbkt-org-logs"
retention_days = "30"
include_children = "true"
location = "global"
}
| `map(any)` | `null` | no | +| bigquery\_options | Destination BigQuery options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- dataset\_name: The name of the bigquery dataset to be created and used for log entries.
- expiration\_days: (Optional) Table expiration time. If null logs will never be deleted.
- partitioned\_tables: (Optional) Options that affect sinks exporting data to BigQuery. use\_partitioned\_tables - (Required) Whether to use BigQuery's partition tables.
- delete\_contents\_on\_destroy: (Optional) If set to true, delete all contained objects in the logging destination.

Destination BigQuery options example:
bigquery_options = {
logging_sink_name = "sk-c-logging-bq"
dataset_name = "audit_logs"
partitioned_tables = "true"
expiration_days = 30
delete_contents_on_destroy = false
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(string)` | `null` | no | +| logbucket\_options | Destination LogBucket options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- name: The name of the log bucket to be created and used for log entries matching the filter.
- location: The location of the log bucket. Default: global.
- retention\_days: (Optional) The number of days data should be retained for the log bucket. Default 30.

Destination LogBucket options example:
logbucket_options = {
logging_sink_name = "sk-c-logging-logbkt"
logging_sink_filter = ""
name = "logbkt-org-logs"
retention_days = "30"
location = "global"
}
| `map(any)` | `null` | no | | logging\_destination\_project\_id | The ID of the project that will have the resources where the logs will be created. | `string` | n/a | yes | | logging\_project\_key | (Optional) The key of logging destination project if it is inside resources map. It is mandatory when resource\_type = project and logging\_target\_type = logbucket. | `string` | `""` | no | -| pubsub\_options | Destination Pubsub options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- topic\_name: The name of the pubsub topic to be created and used for log entries matching the filter.
- create\_subscriber: (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic.

Destination Storage options example:
pubsub_options = {
logging_sink_name = "sk-c-logging-pub"
include_children = true
topic_name = "tp-org-logs"
create_subscriber = true
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(any)` | `null` | no | +| pubsub\_options | Destination Pubsub options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- topic\_name: The name of the pubsub topic to be created and used for log entries matching the filter.
- create\_subscriber: (Optional) Whether to create a subscription to the topic that was created and used for log entries matching the filter. If 'true', a pull subscription is created along with a service account that is granted roles/pubsub.subscriber and roles/pubsub.viewer to the topic.

Destination Storage options example:
pubsub_options = {
logging_sink_name = "sk-c-logging-pub"
topic_name = "tp-org-logs"
create_subscriber = true
logging_sink_filter = < logName: /logs/cloudaudit.googleapis.com%2Factivity OR
logName: /logs/cloudaudit.googleapis.com%2Fsystem_event OR
logName: /logs/cloudaudit.googleapis.com%2Fdata_access OR
logName: /logs/compute.googleapis.com%2Fvpc_flows OR
logName: /logs/compute.googleapis.com%2Ffirewall OR
logName: /logs/cloudaudit.googleapis.com%2Faccess_transparency
EOF
}
| `map(any)` | `null` | no | | resource\_type | Resource type of the resource that will export logs to destination. Must be: project, organization, or folder. | `string` | n/a | yes | | resources | Export logs from the specified resources. | `map(string)` | n/a | yes | -| storage\_options | Destination Storage options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- include\_children: Only valid if 'organization' or 'folder' is chosen as var.resource\_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included.
- storage\_bucket\_name: The name of the storage bucket to be created and used for log entries matching the filter.
- location: (Optional) The location of the logging destination. Default: US.
- Retention Policy variables: (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.
- retention\_policy\_is\_locked: Set if policy is locked.
- retention\_policy\_period\_days: Set the period of days for log retention. Default: 30.
- versioning: (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted.
- force\_destroy: When deleting a bucket, this boolean option will delete all contained objects.

Destination Storage options example:
storage_options = {
logging_sink_name = "sk-c-logging-bkt"
logging_sink_filter = ""
include_children = "true"
storage_bucket_name = "bkt-org-logs"
location = "US"
force_destroy = false
versioning = false
}
| `map(any)` | `null` | no | +| storage\_options | Destination Storage options:
- logging\_sink\_name: The name of the log sink to be created.
- logging\_sink\_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs.
- storage\_bucket\_name: The name of the storage bucket to be created and used for log entries matching the filter.
- location: (Optional) The location of the logging destination. Default: US.
- Retention Policy variables: (Optional) Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.
- retention\_policy\_is\_locked: Set if policy is locked.
- retention\_policy\_period\_days: Set the period of days for log retention. Default: 30.
- versioning: (Optional) Toggles bucket versioning, ability to retain a non-current object version when the live object version gets replaced or deleted.
- force\_destroy: When deleting a bucket, this boolean option will delete all contained objects.

Destination Storage options example:
storage_options = {
logging_sink_name = "sk-c-logging-bkt"
logging_sink_filter = ""
storage_bucket_name = "bkt-org-logs"
location = "US"
force_destroy = false
versioning = false
}
| `map(any)` | `null` | no | ## Outputs | Name | Description | |------|-------------| -| resource\_name\_bigquery | The resource name for the destination BigQuery. | -| resource\_name\_logbucket | The resource name for the destination Log Bucket. | -| resource\_name\_pubsub | The resource name for the destination Pub/Sub. | -| resource\_name\_storage | The resource name for the destination Storage. | +| bigquery\_destination\_name | The resource name for the destination BigQuery. | +| logbucket\_destination\_name | The resource name for the destination Log Bucket. | +| pubsub\_destination\_name | The resource name for the destination Pub/Sub. | +| storage\_destination\_name | The resource name for the destination Storage. | diff --git a/1-org/modules/centralized-logging/main.tf b/1-org/modules/centralized-logging/main.tf index 09bc4f587..ad436ac18 100644 --- a/1-org/modules/centralized-logging/main.tf +++ b/1-org/modules/centralized-logging/main.tf @@ -17,47 +17,60 @@ locals { value_first_resource = values(var.resources)[0] logbucket_sink_member = { for k, v in var.resources : k => v if k != var.logging_project_key } - - all_sinks = merge( - var.bigquery_options == null ? {} : { - for k, v in var.resources : "${v}_bgq" => var.bigquery_options - }, - var.logbucket_options == null ? {} : { - for k, v in var.resources : "${v}_lbk" => var.logbucket_options - }, - var.pubsub_options == null ? {} : { - for k, v in var.resources : "${v}_pub" => var.pubsub_options - }, - var.storage_options == null ? {} : { - for k, v in var.resources : "${v}_sto" => var.storage_options - }, - ) + include_children = (var.resource_type == "organization" || var.resource_type == "folder") + + # Create an intermediate list with all resources X all destinations + exports_list = flatten([ + # Iterate in all resources + for res_k, res_v in var.resources : [ + # Iterate in all log destinations + for dest_k, dest_v in local.destinations_options : { + # Create an object that is the base for a map creation below + "res" = res_v, + "options" = dest_v, + "type" = dest_k + } if dest_v != null + ] + ]) + + # Create a map based on the intermediate list above + # with keys "_" that is used by iam permissions on destinations + log_exports = { + for v in local.exports_list : "${v.res}_${v.type}" => v + } + destinations_options = { + bgq = var.bigquery_options + pub = var.pubsub_options + sto = var.storage_options + lbk = var.logbucket_options + } logging_sink_name_map = { - bgq = try("ds-logs-${var.logging_destination_project_id}", "ds-logs") - pub = try("tp-logs-${var.logging_destination_project_id}", "tp-logs") - sto = try("bkt-logs-${var.logging_destination_project_id}", "bkt-logs") - lbk = try("logbkt-logs-${var.logging_destination_project_id}", "logbkt-logs") + bgq = try("sk-to-ds-logs-${var.logging_destination_project_id}", "sk-to-ds-logs") + pub = try("sk-to-tp-logs-${var.logging_destination_project_id}", "sk-to-tp-logs") + sto = try("sk-to-bkt-logs-${var.logging_destination_project_id}", "sk-to-bkt-logs") + lbk = try("sk-to-logbkt-logs-${var.logging_destination_project_id}", "sk-to-logbkt-logs") } + + logging_tgt_name = { + bgq = replace("${local.logging_tgt_prefix.bgq}${random_string.suffix.result}", "-", "_") + pub = "${local.logging_tgt_prefix.pub}${random_string.suffix.result}" + sto = "${local.logging_tgt_prefix.sto}${random_string.suffix.result}" + lbk = "${local.logging_tgt_prefix.lbk}${random_string.suffix.result}" + } + destination_uri_map = { bgq = try(module.destination_bigquery[0].destination_uri, "") pub = try(module.destination_pubsub[0].destination_uri, "") sto = try(module.destination_storage[0].destination_uri, "") lbk = try(module.destination_logbucket[0].destination_uri, "") } - logging_target_name_prefix = { + logging_tgt_prefix = { bgq = "ds_logs_" pub = "tp-logs-" sto = try("bkt-logs-${var.logging_destination_project_id}-", "bkt-logs-") lbk = "logbkt-logs-" } - - part_tables = { - false = { use_partitioned_tables = false } - true = { use_partitioned_tables = true } - } - - bgq_options_part_tables = var.bigquery_options == null ? local.part_tables.false : !contains(keys(var.bigquery_options), "partitioned_tables") ? local.part_tables.false : var.bigquery_options.partitioned_tables == "true" ? local.part_tables.true : local.part_tables.false } resource "random_string" "suffix" { @@ -70,31 +83,29 @@ module "log_export" { source = "terraform-google-modules/log-export/google" version = "~> 7.3.0" - for_each = local.all_sinks + for_each = local.log_exports - destination_uri = lookup(each.value, "destination_uri", lookup(local.destination_uri_map, substr(each.key, -3, -1), "")) - filter = lookup(each.value, "logging_sink_filter", "") - log_sink_name = lookup(each.value, "logging_sink_name", "sk-to-${lookup(local.logging_sink_name_map, substr(each.key, -3, -1), "log_dest_")}") - parent_resource_id = substr(each.key, 0, length(each.key) - 4) + destination_uri = lookup(each.value.options, "destination_uri", local.destination_uri_map[each.value.type]) + filter = lookup(each.value.options, "logging_sink_filter", "") + log_sink_name = lookup(each.value.options, "logging_sink_name", local.logging_sink_name_map[each.value.type]) + parent_resource_id = each.value.res parent_resource_type = var.resource_type unique_writer_identity = true - include_children = tobool(lookup(each.value, "include_children", "false")) - bigquery_options = substr(each.key, -3, -1) == "bgq" ? local.bgq_options_part_tables : null + include_children = local.include_children + bigquery_options = each.value.type == "bgq" ? { use_partitioned_tables = true } : null } #-------------------------# # Send logs to Log Bucket # #-------------------------# module "destination_logbucket" { - // source = "terraform-google-modules/log-export/google//modules/logbucket" - // version = "~> 7.4.2" - - source = "github.com/terraform-google-modules/terraform-google-log-export//modules/logbucket" + source = "terraform-google-modules/log-export/google//modules/logbucket" + version = "~> 7.4.2" count = var.logbucket_options != null ? 1 : 0 project_id = var.logging_destination_project_id - name = lookup(var.logbucket_options, "name", "${lookup(local.logging_target_name_prefix, "lbk", "log_dest_")}${random_string.suffix.result}") + name = lookup(var.logbucket_options, "name", local.logging_tgt_name.lbk) log_sink_writer_identity = module.log_export["${local.value_first_resource}_lbk"].writer_identity location = lookup(var.logbucket_options, "location", "global") retention_days = lookup(var.logbucket_options, "retention_days", 30) @@ -109,7 +120,10 @@ resource "google_project_iam_member" "logbucket_sink_member" { project = var.logging_destination_project_id role = "roles/logging.bucketWriter" - member = module.log_export["${each.value}_lbk"].writer_identity + + # Set permission only on sinks for this destination using + # module.log_export key "_" + member = module.log_export["${each.value}_lbk"].writer_identity } @@ -123,7 +137,7 @@ module "destination_bigquery" { count = var.bigquery_options != null ? 1 : 0 project_id = var.logging_destination_project_id - dataset_name = replace(lookup(var.bigquery_options, "dataset_name", "${lookup(local.logging_target_name_prefix, "bgq", "log_dest_")}${random_string.suffix.result}"), "-", "_") + dataset_name = lookup(var.bigquery_options, "dataset_name", local.logging_tgt_name.bgq) log_sink_writer_identity = module.log_export["${local.value_first_resource}_bgq"].writer_identity expiration_days = lookup(var.bigquery_options, "expiration_days", null) delete_contents_on_destroy = lookup(var.bigquery_options, "delete_contents_on_destroy", false) @@ -151,7 +165,7 @@ module "destination_storage" { count = var.storage_options != null ? 1 : 0 project_id = var.logging_destination_project_id - storage_bucket_name = lookup(var.storage_options, "storage_bucket_name", "${lookup(local.logging_target_name_prefix, "sto", "log_dest_")}${random_string.suffix.result}") + storage_bucket_name = lookup(var.storage_options, "storage_bucket_name", local.logging_tgt_name.sto) log_sink_writer_identity = module.log_export["${local.value_first_resource}_sto"].writer_identity uniform_bucket_level_access = true location = lookup(var.storage_options, "location", "US") @@ -186,9 +200,9 @@ module "destination_pubsub" { count = var.pubsub_options != null ? 1 : 0 project_id = var.logging_destination_project_id - topic_name = lookup(var.pubsub_options, "topic_name", "${lookup(local.logging_target_name_prefix, "pub", "log_dest_")}${random_string.suffix.result}") + topic_name = lookup(var.pubsub_options, "topic_name", local.logging_tgt_name.pub) log_sink_writer_identity = module.log_export["${local.value_first_resource}_pub"].writer_identity - create_subscriber = lookup(var.pubsub_options, "create_subscriber", false) + create_subscriber = !contains(keys(var.pubsub_options), "create_subscriber") ? false : var.pubsub_options.create_subscriber } #---------------------------------------# diff --git a/1-org/modules/centralized-logging/outputs.tf b/1-org/modules/centralized-logging/outputs.tf index 41c2b37fd..e44824198 100644 --- a/1-org/modules/centralized-logging/outputs.tf +++ b/1-org/modules/centralized-logging/outputs.tf @@ -14,22 +14,22 @@ * limitations under the License. */ -output "resource_name_bigquery" { +output "bigquery_destination_name" { description = "The resource name for the destination BigQuery." value = try(module.destination_bigquery[0].resource_name, "") } -output "resource_name_storage" { +output "storage_destination_name" { description = "The resource name for the destination Storage." value = try(module.destination_storage[0].resource_name, "") } -output "resource_name_pubsub" { +output "pubsub_destination_name" { description = "The resource name for the destination Pub/Sub." value = try(module.destination_pubsub[0].resource_name, "") } -output "resource_name_logbucket" { +output "logbucket_destination_name" { description = "The resource name for the destination Log Bucket." value = try(module.destination_logbucket[0].resource_name, "") } diff --git a/1-org/modules/centralized-logging/variables.tf b/1-org/modules/centralized-logging/variables.tf index 410679a20..05767c2b5 100644 --- a/1-org/modules/centralized-logging/variables.tf +++ b/1-org/modules/centralized-logging/variables.tf @@ -54,7 +54,6 @@ variable "logbucket_options" { Destination LogBucket options: - logging_sink_name: The name of the log sink to be created. - logging_sink_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. -- include_children: Only valid if 'organization' or 'folder' is chosen as var.resource_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. - name: The name of the log bucket to be created and used for log entries matching the filter. - location: The location of the log bucket. Default: global. - retention_days: (Optional) The number of days data should be retained for the log bucket. Default 30. @@ -66,7 +65,6 @@ logbucket_options = { logging_sink_filter = "" name = "logbkt-org-logs" retention_days = "30" - include_children = "true" location = "global" } ``` @@ -78,11 +76,6 @@ EOT condition = var.logbucket_options == null ? true : !contains(keys(var.logbucket_options), "retention_days") ? true : can(tonumber(var.logbucket_options["retention_days"])) error_message = "Retention days must be a number. Default 30 days." } - - validation { - condition = var.logbucket_options == null ? true : !contains(keys(var.logbucket_options), "include_children") ? true : can(tobool(var.logbucket_options["include_children"])) - error_message = "Include_children option must be a bool (true or false). Default false." - } } @@ -94,7 +87,6 @@ variable "bigquery_options" { Destination BigQuery options: - logging_sink_name: The name of the log sink to be created. - logging_sink_filter: The filter to apply when exporting logs. Only log entries that match the filter are exported. Default is '' which exports all logs. -- include_children: Only valid if 'organization' or 'folder' is chosen as var.resource_type. Determines whether or not to include children organizations/folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization/folder are included. - dataset_name: The name of the bigquery dataset to be created and used for log entries. - expiration_days: (Optional) Table expiration time. If null logs will never be deleted. - partitioned_tables: (Optional) Options that affect sinks exporting data to BigQuery. use_partitioned_tables - (Required) Whether to use BigQuery's partition tables. @@ -106,7 +98,6 @@ bigquery_options = { logging_sink_name = "sk-c-logging-bq" dataset_name = "audit_logs" partitioned_tables = "true" - include_children = "true" expiration_days = 30 delete_contents_on_destroy = false logging_sink_filter = < Date: Fri, 2 Sep 2022 11:50:27 -0300 Subject: [PATCH 27/27] Fix outputs and remote state vars --- 1-org/envs/shared/README.md | 1 + 1-org/envs/shared/log_sinks.tf | 4 ++-- 1-org/envs/shared/outputs.tf | 11 ++++++++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/1-org/envs/shared/README.md b/1-org/envs/shared/README.md index 983b28d5b..cef498495 100644 --- a/1-org/envs/shared/README.md +++ b/1-org/envs/shared/README.md @@ -64,6 +64,7 @@ | dns\_hub\_project\_id | The DNS hub project ID | | domains\_to\_allow | The list of domains to allow users from in IAM. | | interconnect\_project\_id | The Dedicated Interconnect project ID | +| logs\_export\_bigquery\_dataset\_name | The log bucket for destination of log exports. See https://cloud.google.com/logging/docs/routing/overview#buckets | | logs\_export\_logbucket\_name | The log bucket for destination of log exports. See https://cloud.google.com/logging/docs/routing/overview#buckets | | logs\_export\_pubsub\_topic | The Pub/Sub topic for destination of log exports | | logs\_export\_storage\_bucket\_name | The storage bucket for destination of log exports | diff --git a/1-org/envs/shared/log_sinks.tf b/1-org/envs/shared/log_sinks.tf index 300077444..b76bb1c0d 100644 --- a/1-org/envs/shared/log_sinks.tf +++ b/1-org/envs/shared/log_sinks.tf @@ -15,8 +15,8 @@ */ locals { - parent_resource_id = var.parent_folder != "" ? var.parent_folder : var.org_id - parent_resource_type = var.parent_folder != "" ? "folder" : "organization" + parent_resource_id = local.parent_folder != "" ? local.parent_folder : local.org_id + parent_resource_type = local.parent_folder != "" ? "folder" : "organization" parent_resources = { resource = local.parent_resource_id } main_logs_filter = <