diff --git a/gcp/demo/README.md b/gcp/demo/README.md new file mode 100644 index 0000000..99bad7d --- /dev/null +++ b/gcp/demo/README.md @@ -0,0 +1,73 @@ +# PTFE (External Services) resource provisioner - GCP + +Provisions the necessary GCP resources for the External Services installation type. + +## Resources + +* google_compute_instance.ptfe1 +* google_compute_instance.ptfe2 +* google_sql_database_instance.pes +* google_storage_bucket.pes + +## Architecture + +### Application Layer + +The `network` module uses a data source to discover healthy availability zones and returns them as a list: + +```hcl +data "google_compute_zones" "main" { + status = "UP" +} +``` + +The resulting list is passed into the `pes` module as the `zone` variable. The first two availability zones in the list are used as locations for two VM instances (**ptfe1** and **ptfe2**) ensuring they are in different availability zones: + +```hcl +resource "google_compute_instance" "ptfe1" { + zone = "${var.zone[0]}" +} + +resource "google_compute_instance" "ptfe2" { + zone = "${var.zone[1]}" +} +``` + +Conditional logic is used to assign an active alias IP to the active VM instance, which is defined by a Terraform variable: + +```hcl +variable "active_ptfe_instance" { + description = "The active PTFE instance ie ptfe1 or ptfe2" + default = "ptfe1" +} + +variable "active_alias_ip" { + description = "Alias IP attached to the active PTFE VM instance" +} + +variable "standby_alias_ip" { + description = "Alias IP attached to the standby PTFE VM instance" +} + +resource "google_compute_instance" "ptfe1" { + network_interface { + alias_ip_range = { + ip_cidr_range = "${var.active_ptfe_instance == "ptfe1" ? var.active_alias_ip : var.standby_alias_ip}/32" + } + } +} + +resource "google_compute_instance" "ptfe2" { + network_interface { + alias_ip_range = { + ip_cidr_range = "${var.active_ptfe_instance == "ptfe2" ? var.active_alias_ip : var.standby_alias_ip}/32" + } + } +} +``` + +Using the above conditional logic, the `active_alias_ip` can be switched between `ptfe1` and `ptfe2` by changing the value of `active_ptfe_instance` and performing a Terraform run. This could be done manually or automatically following an availability zone failure. Note that the Terraform run would likely produce an error as it will fail to update the configuration of the compute VM instance in the failed availability zone, but the change to the available compute VM instance would succeed. + +### Storage Layer + +Google Cloud SQL (PostgreSQL) and Google Cloud Storage are configured with `REGIONAL` availability and resiliency, resulting in both services remaining available in the event of an availability zone failure. The Google Cloud Platform documentation provides more information on the exact behaviour of each service during an availability zone failure. diff --git a/gcp/demo/main.tf b/gcp/demo/main.tf new file mode 100644 index 0000000..03b98e9 --- /dev/null +++ b/gcp/demo/main.tf @@ -0,0 +1,39 @@ +#------------------------------------------------------------------------------ +# production external-services ptfe resources +#------------------------------------------------------------------------------ + +locals { + namespace = "${var.namespace}-demo" +} + +resource "google_compute_instance" "ptfe" { + name = "${local.namespace}-instance-ptfe" + machine_type = "${var.gcp_machine_type}" + zone = "${var.zone[0]}" + + boot_disk { + initialize_params { + size = 100 + image = "${var.gcp_machine_image}" + } + } + + network_interface { + access_config = {} + subnetwork = "${var.subnetwork}" + } + + metadata_startup_script = "${var.startup_script}" + + service_account { + scopes = ["https://www.googleapis.com/auth/sqlservice.admin"] + } + + allow_stopping_for_update = true + + provisioner "local-exec" { + command = " gcloud compute ssh --zone ${var.zone[0]} ${google_compute_instance.ptfe.name} --command 'sudo /tmp/install_ptfe.sh no-proxy bypass-storagedriver-warnings ' " + } + + +} diff --git a/gcp/demo/outputs.tf b/gcp/demo/outputs.tf new file mode 100644 index 0000000..d280a59 --- /dev/null +++ b/gcp/demo/outputs.tf @@ -0,0 +1,3 @@ +output "vm_instance_name" { + value = "${google_compute_instance.ptfe.name}" +} diff --git a/gcp/demo/variables.tf b/gcp/demo/variables.tf new file mode 100644 index 0000000..ed56121 --- /dev/null +++ b/gcp/demo/variables.tf @@ -0,0 +1,14 @@ +variable "namespace" {} +variable "region" {} + +variable "zone" { + type = "list" +} + +variable "gcp_machine_image" {} +variable "gcp_machine_type" {} +variable "subnetwork" {} +variable "active_ptfe_instance" {} +variable "active_alias_ip" {} +variable "standby_alias_ip" {} +variable "startup_script" {} diff --git a/gcp/main.tf b/gcp/main.tf index dbe4f53..d902e16 100644 --- a/gcp/main.tf +++ b/gcp/main.tf @@ -3,9 +3,9 @@ terraform { } provider "google" { - credentials = "${file("service_account.json")}" - project = "${var.project}" - region = "${var.region}" + #credentials = "${file("service_account.json")}" + project = "${var.project}" + region = "${var.region}" } resource "google_compute_project_metadata_item" "ssh_key" { @@ -30,22 +30,25 @@ module "network" { # demo/poc ptfe #------------------------------------------------------------------------------ -#module "demo" { -# source = "demo/" -# namespace = "${var.namespace}" -# aws_instance_ami = "${var.aws_instance_ami}" -# aws_instance_type = "${var.aws_instance_type}" -# subnet_id = "${module.network.subnet_ids[0]}" -# vpc_security_group_ids = "${module.network.security_group_id}" -# user_data = "" -# ssh_key_name = "${var.ssh_key_name}" -# hashidemos_zone_id = "${data.aws_route53_zone.hashidemos.zone_id}" -#} +module "demo" { + source = "demo/" + namespace = "${var.namespace}" + region = "${var.region}" + zone = "${module.network.available_zones}" + subnetwork = "${module.network.private_subnet_self_link}" + active_ptfe_instance = "${var.active_ptfe_instance}" + active_alias_ip = "${var.active_alias_ip}" + standby_alias_ip = "${var.standby_alias_ip}" + gcp_machine_type = "${var.gcp_machine_type}" + gcp_machine_image = "${var.gcp_machine_image}" + startup_script = "${var.startup_script}" +} #------------------------------------------------------------------------------ # production mounted disk ptfe #------------------------------------------------------------------------------ + #module "pmd" { # source = "pmd/" # namespace = "${var.namespace}" @@ -58,33 +61,38 @@ module "network" { # hashidemos_zone_id = "${data.aws_route53_zone.hashidemos.zone_id}" #} + #------------------------------------------------------------------------------ # production external-services ptfe #------------------------------------------------------------------------------ -module "pes" { - source = "pes/" - namespace = "${var.namespace}" - region = "${var.region}" - zone = "${module.network.available_zones}" - subnetwork = "${module.network.private_subnet_self_link}" - active_ptfe_instance = "${var.active_ptfe_instance}" - active_alias_ip = "${var.active_alias_ip}" - standby_alias_ip = "${var.standby_alias_ip}" - gcp_machine_type = "${var.gcp_machine_type}" - gcp_machine_image = "${var.gcp_machine_image}" -} + +#module "pes" { +# source = "pes/" +# namespace = "${var.namespace}" +# region = "${var.region}" +# zone = "${module.network.available_zones}" +# subnetwork = "${module.network.private_subnet_self_link}" +# active_ptfe_instance = "${var.active_ptfe_instance}" +# active_alias_ip = "${var.active_alias_ip}" +# standby_alias_ip = "${var.standby_alias_ip}" +# gcp_machine_type = "${var.gcp_machine_type}" +# gcp_machine_image = "${var.gcp_machine_image}" +#} + #------------------------------------------------------------------------------ # bastion host #------------------------------------------------------------------------------ -module "bastion" { - source = "bastion/" - namespace = "${var.namespace}" - region = "${var.region}" - zone = "${module.network.available_zones}" - subnetwork = "${module.network.private_subnet_self_link}" - gcp_machine_type = "${var.gcp_machine_type}" - gcp_machine_image = "${var.gcp_machine_image}" -} + +#module "bastion" { +# source = "bastion/" +# namespace = "${var.namespace}" +# region = "${var.region}" +# zone = "${module.network.available_zones}" +# subnetwork = "${module.network.private_subnet_self_link}" +# gcp_machine_type = "${var.gcp_machine_type}" +# gcp_machine_image = "${var.gcp_machine_image}" +#} + diff --git a/gcp/outputs.tf b/gcp/outputs.tf index 82b3ce4..ecdb363 100644 --- a/gcp/outputs.tf +++ b/gcp/outputs.tf @@ -1,3 +1,4 @@ +/* output "private_subnet_id" { value = "${module.network.private_subnet_id}" } @@ -5,3 +6,4 @@ output "private_subnet_id" { output "db_connection_name" { value = "${module.pes.db_connection_name}" } +*/ diff --git a/gcp/terraform.tfvars b/gcp/terraform.tfvars new file mode 100644 index 0000000..9daa9b5 --- /dev/null +++ b/gcp/terraform.tfvars @@ -0,0 +1,10 @@ +region = "europe-west1" +project = "gb-playground" +namespace = "guy-ptfe" +gcp_machine_image = "debian-cloud/debian-9-stretch-v20180814" +gcp_machine_type = "n1-standard-4" +ssh_user = "guy" +ssh_public_key_file = "/Users/guy/.ssh/id_rsa.pub" +active_ptfe_instance = "ptfe1" +active_alias_ip = "10.1.0.5" +standby_alias_ip = "10.1.0.6" diff --git a/gcp/variables.tf b/gcp/variables.tf index 75ca361..97b06a5 100644 --- a/gcp/variables.tf +++ b/gcp/variables.tf @@ -38,3 +38,13 @@ variable "ssh_user" { variable "ssh_public_key_file" { description = "Path to SSH public key file for VM instance access eg /home/user/.ssh/id_rsa.pub" } + +variable "startup_script" { + description = "what to run at startup" + type = "string" + default = < /tmp/install_ptfe.sh +sudo chmod 500 /tmp/install_ptfe.sh +EOS +}