Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
azure: update assets directory
Browse files Browse the repository at this point in the history
Updates to new version and according file changes for the respective
version.

Part of #314

Signed-off-by: Kautilya Tripathi <kautilya@kinvolk.io>
  • Loading branch information
knrt10 committed Feb 24, 2022
1 parent f11ad5f commit aa33547
Show file tree
Hide file tree
Showing 15 changed files with 952 additions and 89 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ module "bootkube" {

cluster_name = var.cluster_name
api_servers = [local.api_server]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
etcd_servers = [for i, d in azurerm_linux_virtual_machine.controllers : format("%s-etcd%d.%s", var.cluster_name, i, var.dns_zone)]
etcd_endpoints = azurerm_linux_virtual_machine.controllers.*.private_ip_address
asset_dir = var.asset_dir
controller_count = var.controller_count

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ systemd:
RestartSec=5s
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done; /opt/wait-for-dns ${dns_zone} ${cluster_name}-private 3600'
[Install]
RequiredBy=kubelet.service etcd.service bootkube.service
- name: kubelet.service
Expand Down Expand Up @@ -271,6 +271,64 @@ storage:
kind: KubeletConfiguration
cgroupDriver: "$${docker_cgroup_driver}"
EOF
- path: /opt/wait-for-dns
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash
if [[ $# -ne 3 ]]; then
echo "Usage: $0 <zone> <record> <max_attempts>"
exit 1
fi
zone=$1
record=$2
max_attempts=$3
echo "Figuring out the nameservers for $zone"
nameservers=""
counter=0
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 "$zone" ns)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
nameservers=$out
break
fi
if [[ "$out" = "" ]]; then
echo "No nameservers found for $zone"
else
echo "dig failed with exit code $ret: $out"
fi
sleep 1
counter=$((counter+1))
done
if [[ "$nameservers" == "" ]]; then
echo "Could not resolve nameservers for $zone"
exit 1
fi
for ns in $nameservers; do
echo "Polling $ns for $record.$zone..."
counter=0
ok=false
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 @"$ns" "$record"."$zone" a)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
echo "Looks good!"
ok=true
break
fi
echo "Not available yet"
sleep 1
counter=$((counter+1))
done
if ! $ok; then
echo "$record.$zone didn't become available within the allowed time"
exit 1
fi
done
echo "$record.$zone is available on all nameservers"
exit 0
- path: /etc/docker/daemon.json
filesystem: root
mode: 0500
Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,3 @@
# Azure resource group for DNS zone
# DNS zone for clusters
resource "azurerm_dns_zone" "clusters" {
resource_group_name = azurerm_resource_group.cluster.name
name = var.dns_zone
}

# Discrete DNS records for each controller's private IPv4 for etcd usage
resource "azurerm_dns_a_record" "etcds" {
count = var.controller_count
resource_group_name = var.dns_zone_group

# DNS Zone name where record should be created
zone_name = var.dns_zone

# DNS record
name = format("%s-etcd%d", var.cluster_name, count.index)
ttl = 300

# private IPv4 address for etcd
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
}

locals {
# Container Linux derivative
Expand Down Expand Up @@ -161,6 +139,7 @@ data "template_file" "controller-configs" {
template = file("${path.module}/cl/controller.yaml.tmpl")

vars = {
cluster_name = var.cluster_name
# Cannot use cyclic dependencies on controllers or their DNS records
etcd_name = "etcd${count.index}"
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"
Expand All @@ -171,6 +150,7 @@ data "template_file" "controller-configs" {
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
enable_tls_bootstrap = var.enable_tls_bootstrap
dns_zone = var.dns_zone
}
}

Expand Down
20 changes: 2 additions & 18 deletions assets/terraform-modules/azure/flatcar-linux/kubernetes/lb.tf
Original file line number Diff line number Diff line change
@@ -1,18 +1,3 @@
# DNS record for the apiserver load balancer
resource "azurerm_dns_a_record" "apiserver" {
resource_group_name = var.dns_zone_group

# DNS Zone name where record should be created
zone_name = var.dns_zone

# DNS record
name = var.cluster_name
ttl = 300

# IPv4 address of apiserver load balancer
records = [azurerm_public_ip.apiserver-ipv4.ip_address]
}

# Static IPv4 address for the apiserver frontend
resource "azurerm_public_ip" "apiserver-ipv4" {
resource_group_name = azurerm_resource_group.cluster.name
Expand Down Expand Up @@ -114,9 +99,8 @@ resource "azurerm_lb_outbound_rule" "worker-outbound" {
# Address pool of controllers
resource "azurerm_lb_backend_address_pool" "controller" {
resource_group_name = azurerm_resource_group.cluster.name

name = "controller"
loadbalancer_id = azurerm_lb.cluster.id
name = "controller"
loadbalancer_id = azurerm_lb.cluster.id
}

# Address pool of workers
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,3 +98,11 @@ output "bootstrap-secrets_values" {
output "node-local-dns_values" {
value = module.bootkube.node-local-dns_values
}

output "controllers_public_ipv4" {
value = azurerm_linux_virtual_machine.controllers.*.public_ip_address
}

output "controllers_private_ipv4" {
value = azurerm_linux_virtual_machine.controllers.*.private_ip_address
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ resource "null_resource" "copy-controller-secrets" {

connection {
type = "ssh"
host = azurerm_public_ip.controllers[count.index].ip_address
host = azurerm_linux_virtual_machine.controllers[count.index].public_ip_address
user = "core"
timeout = "15m"
}
Expand Down Expand Up @@ -96,13 +96,12 @@ resource "null_resource" "copy-controller-secrets" {
resource "null_resource" "bootkube-start" {
depends_on = [
module.bootkube,
azurerm_dns_a_record.apiserver,
null_resource.copy-controller-secrets,
]

connection {
type = "ssh"
host = azurerm_public_ip.controllers[0].ip_address
host = azurerm_linux_virtual_machine.controllers[0].public_ip_address
user = "core"
timeout = "15m"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ variable "region" {

variable "dns_zone" {
type = string
description = "Azure DNS Zone (e.g. azure.example.com)"
description = "DNS Zone (e.g. example.com)"
}

variable "dns_zone_group" {
type = string
description = "Resource group where the Azure DNS Zone resides (e.g. global)"
}
# variable "dns_zone_group" {
# type = string
# description = "Resource group where the Azure DNS Zone resides (e.g. global)"
# }

# variable "custom_image_resource_group_name" {
# type = string
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ terraform {
}
azurerm = {
source = "hashicorp/azurerm"
version = "2.92.0"
version = "2.97.0"
}
null = {
source = "hashicorp/null"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ systemd:
RestartSec=5s
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done; /opt/wait-for-dns ${dns_zone} ${cluster_name}-private 3600'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
Expand Down Expand Up @@ -83,6 +83,7 @@ systemd:
--node-labels=$${NODE_LABELS} \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--register-with-taints=$${NODE_TAINTS} \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
ExecStart=docker logs -f kubelet
ExecStop=docker stop kubelet
Expand Down Expand Up @@ -118,10 +119,10 @@ storage:
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=docker://quay.io/kinvolk/kubelet
KUBELET_IMAGE_URL=quay.io/kinvolk/kubelet
KUBELET_IMAGE_TAG=v1.21.4
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
NODE_LABELS="${join(",", [for k, v in node_labels : "${k}=${v}"])}"
NODE_TAINTS="${join(",", [for k, v in taints : "${k}=${v}"])}"
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
Expand All @@ -147,6 +148,64 @@ storage:
cpu: ${kube_reserved_cpu}
%{~ endif ~}
EOF
- path: /opt/wait-for-dns
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash
if [[ $# -ne 3 ]]; then
echo "Usage: $0 <zone> <record> <max_attempts>"
exit 1
fi
zone=$1
record=$2
max_attempts=$3
echo "Figuring out the nameservers for $zone"
nameservers=""
counter=0
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 "$zone" ns)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
nameservers=$out
break
fi
if [[ "$out" = "" ]]; then
echo "No nameservers found for $zone"
else
echo "dig failed with exit code $ret: $out"
fi
sleep 1
counter=$((counter+1))
done
if [[ "$nameservers" == "" ]]; then
echo "Could not resolve nameservers for $zone"
exit 1
fi
for ns in $nameservers; do
echo "Polling $ns for $record.$zone..."
counter=0
ok=false
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 @"$ns" "$record"."$zone" a)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
echo "Looks good!"
ok=true
break
fi
echo "Not available yet"
sleep 1
counter=$((counter+1))
done
if ! $ok; then
echo "$record.$zone didn't become available within the allowed time"
exit 1
fi
done
echo "$record.$zone is available on all nameservers"
exit 0
- path: /etc/kubernetes/delete-node
filesystem: root
mode: 0744
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
variable "cluster_name" {
type = string
description = "Unique cluster name (prepended to dns_zone)"
}

variable "pool_name" {
type = string
description = "Unique name for the worker pool"
Expand Down Expand Up @@ -30,6 +35,24 @@ variable "backend_address_pool_id" {
description = "Must be set to the `worker_backend_address_pool_id` output by cluster"
}

variable "dns_zone" {
type = string
description = "DNS Zone (e.g. example.com)"
}

variable "labels" {
type = map(string)
description = "Map of custom labels for worker nodes."
default = {}
}

variable "taints" {
type = map(string)
default = {}
description = "Map of custom taints for worker nodes."
}


# variable "custom_image_resource_group_name" {
# type = string
# description = "The name of the Resource Group in which the Custom Image exists."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ terraform {
}
azurerm = {
source = "hashicorp/azurerm"
version = "2.92.0"
version = "2.97.0"
}
template = {
source = "hashicorp/template"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,16 +99,7 @@ resource "azurerm_monitor_autoscale_setting" "workers" {

# Worker Ignition configs
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
pretty_print = false
snippets = var.clc_snippets
}

# Worker Container Linux configs
data "template_file" "worker-config" {
template = file("${path.module}/cl/worker.yaml.tmpl")

vars = {
content = templatefile("${path.module}/cl/worker.yaml.tmpl", {
kubeconfig = var.enable_tls_bootstrap ? indent(10, templatefile("${path.module}/cl/bootstrap-kubeconfig.yaml.tmpl", {
token_id = random_string.bootstrap_token_id[0].result
token_secret = random_string.bootstrap_token_secret[0].result
Expand All @@ -122,5 +113,11 @@ data "template_file" "worker-config" {
cpu_manager_policy = var.cpu_manager_policy
system_reserved_cpu = var.system_reserved_cpu
kube_reserved_cpu = var.kube_reserved_cpu
}
node_labels = merge({ "node.kubernetes.io/node" = "" }, var.labels)
taints = var.taints
dns_zone = var.dns_zone
cluster_name = var.cluster_name
})
pretty_print = false
snippets = var.clc_snippets
}
Loading

0 comments on commit aa33547

Please sign in to comment.