Skip to content

Commit

Permalink
spins up cluster with working bgp sessions and global ipv4 with all c…
Browse files Browse the repository at this point in the history
…luster controllers a backend
  • Loading branch information
Joseph D. Marhee committed Mar 15, 2019
1 parent 91cd96d commit 027ea8b
Show file tree
Hide file tree
Showing 8 changed files with 60 additions and 21 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@
*.tfvars
packet_key
packet_key.pub
3-cluster-inventory.tf
8 changes: 7 additions & 1 deletion 1-provider.tf
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
provider "packet" {
version = "1.3.2"
auth_token = "${var.auth_token}"
}

resource "packet_reserved_ip_block" "anycast_ip" {
project_id = "${var.project_id}"
type = "global_ipv4"
quantity = 1
facility = ""
}
20 changes: 10 additions & 10 deletions 2-clusters.tf
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
#Your Initial Cluster is defined here, subsequent clusters inventoried in 3-cluster-inventory.tf, created by Makefile
module "cluster_facility" {
source = "modules/cluster_pool"

cluster_name = "primary"
count = "${var.count}"
plan_primary = "${var.plan_primary}"
plan_node = "${var.plan_node}"
facility = "${var.facility}"
auth_token = "${var.auth_token}"
project_id = "${var.project_id}"
ssh_private_key_path = "${var.ssh_private_key_path}"
cluster_name = "primary"
count = "${var.count}"
plan_primary = "${var.plan_primary}"
plan_node = "${var.plan_node}"
facility = "${var.facility}"
auth_token = "${var.auth_token}"
project_id = "${var.project_id}"
ssh_private_key_path = "${var.ssh_private_key_path}"
anycast_ip = "${packet_reserved_ip_block.anycast_ip.address}"
}

#Subsequent Clusters will be populated below this line. See README for spin-up procedure.

6 changes: 4 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@ endif
define-cluster:
if [ -z $(facility) ]; then \
echo "Command format:\n\tmake facility=\"ewr1\" cluster_id=\"some_name\" define-cluster\n\n"; exit 1; fi
cat template.tpl | sed -e 's|REGION|${facility}|g' -e 's|NAME|$(cluster_id)|g' | tee -a 2-clusters.tf

echo "\n#Cluster $(cluster_id) in ${facility} generated `date +%F%H%M%S`\n" | tee -a 3-cluster-inventory.tf > /dev/null ; \
cat template.tpl | sed -e 's|REGION|${facility}|g' -e 's|NAME|$(cluster_id)|g' | tee -a 3-cluster-inventory.tf > /dev/null ; \
echo "\nCluster Name: cluster_$(cluster_id)_${facility}\nRun \`make cluster_name='cluster_$(cluster_id)_${facility}' apply-cluster\` to apply changes.\n"
apply-cluster:
if [ -z $(cluster_name) ]; then \
echo "\n\n Command format:\n\tmake cluster_name=\"cluster_id_facility\" spinup-cluster\n\n"; exit 1; fi
terraform validate ; \
terraform init ; \
terraform apply -target="module.$(cluster_name)"
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ In addition to Terraform, your client machine (where Terraform will be run from)

You will need an SSH key associated with this project, or your account. Add the identity path to `ssh_private_key`--this will only be used _locally_ to assist Terraform in completing cluster bootstrapping (needed to retrieve the cluster node-token from the controller node).

BGP will need to be enabled for your project.

Clusters
-

Expand All @@ -39,14 +41,14 @@ and then apply your new cluster module (if you do not wish to apply any other ou
```
make cluster_name="cluster_control_ewr1" apply-cluster
```
where `cluster_name` is the module name for that cluster in `2-clusters.tf`, if you wish to review this manually before applying. This will follow the format `cluster_$cluster-id_$facility`.
where `cluster_name` is the module name for that cluster in `3-cluster-inventory.tf`, if you wish to review this manually before applying. This will follow the format `cluster_$cluster-id_$facility`.

<h3>Manually defining a Cluster</h3>

To create a cluster manually, in `2-clusters.tf`, instantiate a new `cluster_pool` module:
To create a cluster manually, in `3-cluster-inventory.tf` (this is ignored by git--your initial cluster setup is in `2-clusters.tf`, and is tracked), instantiate a new `cluster_pool` module:

```
module "cluster_nrt1" {
module "cluster_name_nrt1" {
source = "modules/cluster_pool"
cluster_name = "your_cluster_name"
Expand Down
17 changes: 16 additions & 1 deletion modules/cluster_pool/controller.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,18 @@ function apply_workloads {
wget https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml
}

function start_anycast {
apt update; apt install -y bird ; \
while true; do \
if [ ! -f /root/create_bird_conf.sh ]; then \
echo "Bird not ready...waiting..."
else
bash /root/create_bird_conf.sh "${anycast_ip}"
break
fi
done
}

init_cluster && \
start_cluster && \
check_cluster && \
Expand All @@ -71,4 +83,7 @@ echo "MetalLB configured...\nTo allocate a Service with an IP from ${packet_netw
echo "Finishing..." ; \
echo "Renaming context to $(hostname)..." && \
kubectl config rename-context default $(hostname) && \
kubectl config get-contexts
kubectl config get-contexts && \
echo "Cluster controller spinup complete...setting up Bird..." && \
echo "Starting script for ${anycast_ip}..." ; \
start_anycast
18 changes: 15 additions & 3 deletions modules/cluster_pool/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ variable "auth_token" {}
variable "project_id" {}
variable "cluster_name" {}
variable "ssh_private_key_path" {}
variable "anycast_ip" {}

resource "packet_reserved_ip_block" "packet-k3s" {
project_id = "${var.project_id}"
Expand All @@ -20,19 +21,30 @@ data "template_file" "controller" {
packet_network_cidr = "${packet_reserved_ip_block.packet-k3s.cidr_notation}"
packet_auth_token = "${var.auth_token}"
packet_project_id = "${var.project_id}"
anycast_ip = "${var.anycast_ip}"
}
}

resource "packet_device" "k3s_primary" {
hostname = "packet-k3s-${var.cluster_name}-controller"
hostname = "packet-k3s-${var.cluster_name}-${var.facility}-controller"
operating_system = "ubuntu_16_04"
plan = "${var.plan_primary}"
facility = "${var.facility}"
user_data = "${data.template_file.controller.rendered}"

provisioner "local-exec" {
command = "scp -i ${var.ssh_private_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null scripts/create_bird_conf.sh root@${self.access_public_ipv4}:/root/create_bird_conf.sh"
}

billing_cycle = "hourly"
project_id = "${var.project_id}"
}

resource "packet_bgp_session" "test" {
device_id = "${packet_device.k3s_primary.id}"
address_family = "ipv4"
}

resource "packet_ip_attachment" "kubernetes_lb_block" {
device_id = "${packet_device.k3s_primary.id}"
cidr_notation = "${packet_reserved_ip_block.packet-k3s.cidr_notation}"
Expand All @@ -47,15 +59,15 @@ data "template_file" "node" {
}

resource "packet_device" "arm_node" {
hostname = "${format("packet-k3s-${var.cluster_name}-%02d", count.index)}"
hostname = "${format("packet-k3s-${var.cluster_name}-${var.facility}-%02d", count.index)}"
operating_system = "ubuntu_16_04"
count = "${var.count}"
plan = "${var.plan_node}"
facility = "${var.facility}"
user_data = "${data.template_file.node.rendered}"

provisioner "local-exec" {
command = "scp -3 -i ${var.ssh_private_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q root@${packet_device.k3s_primary.network.0.address}:/var/lib/rancher/k3s/server/node-token root@${self.access_public_ipv4}:node-token"
command = "scp -3 -i ${var.ssh_private_key_path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -q root@${packet_device.k3s_primary.network.0.address}:/var/lib/rancher/k3s/server/node-token root@${self.access_public_ipv4}:node-token"
}

billing_cycle = "hourly"
Expand Down
3 changes: 2 additions & 1 deletion template.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ module "cluster_NAME_REGION" {
plan_node = "${var.plan_node}"
facility = "REGION"
auth_token = "${var.auth_token}"
project_id = "${packet_project.k3s_packet.id}"
project_id = "${var.project_id}"
ssh_private_key_path = "${var.ssh_private_key_path}"
anycast_ip = "${packet_reserved_ip_block.anycast_ip.address}"
}

0 comments on commit 027ea8b

Please sign in to comment.