Skip to content

Commit

Permalink
[modules/aws-ec2-docker-ucp-workers/main.tf] cnunez | i ran into a se…
Browse files Browse the repository at this point in the history
…lf-referencing splat error that preventing me from using the same approach taht I did with the (single-node) managers. using a null resource with triggers proports to solve this. See: hashicorp/terraform#2677
  • Loading branch information
Carlos Nunez committed Jul 18, 2017
1 parent e2da7b6 commit 0a0d3b4
Showing 1 changed file with 52 additions and 6 deletions.
58 changes: 52 additions & 6 deletions docker-ddc-terraform/modules/aws-ec2-docker-ucp-workers/main.tf
Expand Up @@ -166,6 +166,25 @@ EOF
delete_on_termination = true
}
}
resource "null_resource" "provision_ucp_worker_a" {
triggers {
ucp_worker_changes = "${join(",", aws_instance.ucp_worker_a.*.id)}"
}
count = "${var.number_of_aws_availability_zones_to_use > 1 ? var.number_of_workers_per_az : 0}"
provisioner "local-exec" {
command = <<EOF
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook \
-u ubuntu \
--private-key ${var.aws_ec2_private_key_location} \
-i ${element(aws_instance.ucp_worker_a.*.public_ip,count.index)}, \
-e docker_ee_repo_url=${var.docker_ee_repo_url} \
-e ucp_role=worker \
-e is_primary_node=false \
-e docker_ucp_swarm_leader=${var.docker_swarm_leader_ip} \
docker-ucp-playbook.yml
EOF
}
}

resource "aws_route53_record" "ucp_worker_b" {
depends_on = [
Expand All @@ -179,24 +198,31 @@ resource "aws_route53_record" "ucp_worker_b" {
records = [ "${element(aws_instance.ucp_worker_b.*.public_dns, count.index)}" ]
}

resource "aws_instance" "ucp_worker_c" {
depends_on = [
"aws_security_group.ucp_worker",
"aws_subnet.worker_subnet_c"
]
resource "null_resource" "provision_ucp_worker_b" {
triggers {
ucp_worker_changes = "${join(",", aws_instance.ucp_worker_b.*.id)}"
}
count = "${var.number_of_aws_availability_zones_to_use > 1 ? var.number_of_workers_per_az : 0}"
provisioner "local-exec" {
command = <<EOF
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook \
-u ubuntu \
--private-key ${var.aws_ec2_private_key_location} \
-i ${element(aws_instance.ucp_worker_c.*.public_ip,count.index)}, \
-i ${element(aws_instance.ucp_worker_b.*.public_ip,count.index)}, \
-e docker_ee_repo_url=${var.docker_ee_repo_url} \
-e ucp_role=worker \
-e is_primary_node=false \
-e docker_ucp_swarm_leader=${var.docker_swarm_leader_ip} \
docker-ucp-playbook.yml
EOF
}
}

resource "aws_instance" "ucp_worker_c" {
depends_on = [
"aws_security_group.ucp_worker",
"aws_subnet.worker_subnet_c"
]
associate_public_ip_address = true
subnet_id = "${aws_subnet.worker_subnet_c.id}"
count = "${var.number_of_aws_availability_zones_to_use > 2 ? var.number_of_workers_per_az : 0}"
Expand All @@ -218,6 +244,26 @@ EOF
}
}

resource "null_resource" "provision_ucp_worker_c" {
triggers {
ucp_worker_changes = "${join(",", aws_instance.ucp_worker_c.*.id)}"
}
count = "${var.number_of_aws_availability_zones_to_use > 2 ? var.number_of_workers_per_az : 0}"
provisioner "local-exec" {
command = <<EOF
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook \
-u ubuntu \
--private-key ${var.aws_ec2_private_key_location} \
-i ${element(aws_instance.ucp_worker_c.*.public_ip,count.index)}, \
-e docker_ee_repo_url=${var.docker_ee_repo_url} \
-e ucp_role=worker \
-e is_primary_node=false \
-e docker_ucp_swarm_leader=${var.docker_swarm_leader_ip} \
docker-ucp-playbook.yml
EOF
}
}

resource "aws_route53_record" "ucp_worker_c" {
depends_on = [
"aws_instance.ucp_worker_c"
Expand Down

0 comments on commit 0a0d3b4

Please sign in to comment.