Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
azure: update assets directory
Browse files Browse the repository at this point in the history
Updates to new version and according file changes for the respective
version.

Part of #314

Signed-off-by: Kautilya Tripathi <kautilya@kinvolk.io>
  • Loading branch information
knrt10 committed Jan 31, 2022
1 parent f50166a commit b8bd858
Show file tree
Hide file tree
Showing 22 changed files with 621 additions and 218 deletions.
32 changes: 22 additions & 10 deletions assets/terraform-modules/azure/flatcar-linux/kubernetes/bootkube.tf
Original file line number Diff line number Diff line change
@@ -1,22 +1,34 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
locals {
api_server = format("%s.%s", var.cluster_name, var.dns_zone)
}

module "bootkube" {
source = "../../../bootkube"

cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
asset_dir = var.asset_dir
cluster_name = var.cluster_name
api_servers = [local.api_server]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
asset_dir = var.asset_dir
controller_count = var.controller_count

networking = var.networking

network_encapsulation = "vxlan"

# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_mtu = "1410"

pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation

conntrack_max_per_core = var.conntrack_max_per_core
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
bootstrap_tokens = var.enable_tls_bootstrap ? concat([local.controller_bootstrap_token], var.worker_bootstrap_tokens) : []
enable_tls_bootstrap = var.enable_tls_bootstrap
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation
encrypt_pod_traffic = var.encrypt_pod_traffic
# Disable the self hosted kubelet.
disable_self_hosted_kubelet = var.disable_self_hosted_kubelet
certs_validity_period_hours = var.certs_validity_period_hours
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
locals {
controller_bootstrap_token = var.enable_tls_bootstrap ? {
token_id = random_string.bootstrap_token_id[0].result
token_secret = random_string.bootstrap_token_secret[0].result
} : {}
}

# Generate a cryptographically random token id (public).
resource "random_string" "bootstrap_token_id" {
count = var.enable_tls_bootstrap == true ? 1 : 0

length = 6
upper = false
special = false
}

# Generate a cryptographically random token secret.
resource "random_string" "bootstrap_token_secret" {
count = var.enable_tls_bootstrap == true ? 1 : 0

length = 16
upper = false
special = false
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,32 @@
systemd:
units:
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="IMAGE_TAG=v3.4.16"
Environment="IMAGE_URL=docker://quay.io/coreos/etcd"
Environment="RKT_RUN_ARGS=--insecure-options=image"
Environment="SSL_DIR=/etc/ssl/etcd"
Environment="ETCD_NAME=${etcd_name}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381"
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
Environment="ETCD_CLIENT_CERT_AUTH=true"
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
enabled: true
contents: |
[Unit]
Description=etcd (System Container)
Documentation=https://github.com/etcd-io/etcd
Requires=docker.service
After=docker.service
[Service]
Environment=ETCD_IMAGE=quay.io/coreos/etcd:v3.5.1
ExecStartPre=/usr/bin/docker run -d \
--name etcd \
--network host \
--env-file /etc/etcd/etcd.env \
--user 232:232 \
--volume /etc/ssl/etcd:/etc/ssl/certs:ro \
--volume /var/lib/etcd:/var/lib/etcd:rw \
$${ETCD_IMAGE}
ExecStart=docker logs -f etcd
ExecStop=docker stop etcd
ExecStopPost=docker rm etcd
Restart=always
RestartSec=10s
TimeoutStartSec=0
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target
- name: docker.service
enable: true
- name: locksmithd.service
Expand Down Expand Up @@ -88,9 +89,14 @@ systemd:
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/cni/net.d \
--exit-on-lock-contention \
%{~ if enable_tls_bootstrap ~}
--kubeconfig=/var/lib/kubelet/kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \
--rotate-certificates \
%{~ else ~}
--kubeconfig=/etc/kubernetes/kubeconfig \
%{~ endif ~}
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=$${NODE_LABELS} \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
Expand Down Expand Up @@ -137,6 +143,28 @@ storage:
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /etc/etcd/etcd.env
filesystem: root
mode: 0644
contents:
inline: |
ETCD_NAME=${etcd_name}
ETCD_DATA_DIR=/var/lib/etcd
ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380
ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381
ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}
ETCD_STRICT_RECONFIG_CHECK=true
ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt
ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt
ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key
ETCD_CLIENT_CERT_AUTH=true
ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt
ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt
ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key
ETCD_PEER_CLIENT_CERT_AUTH=tru
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
Expand Down Expand Up @@ -164,6 +192,18 @@ storage:
--dns=host \
--hosts-entry=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
- path: /etc/docker/daemon.json
filesystem: root
mode: 0500
contents:
inline: |
{
"live-restore": true,
"log-opts": {
"max-size": "100m",
"max-file": "3"
}
}
passwd:
users:
- name: core
Expand Down
104 changes: 56 additions & 48 deletions assets/terraform-modules/azure/flatcar-linux/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@ resource "azurerm_dns_a_record" "etcds" {
ttl = 300

# private IPv4 address for etcd
records = [azurerm_network_interface.controllers[count.index].private_ip_address]
records = [azurerm_network_interface.controllers.*.private_ip_address[count.index]]
}

locals {
# Channel for a CoreOS Container Linux derivative
# coreos-stable -> CoreOS Container Linux Stable
# Container Linux derivative
# flatcar-stable -> Flatcar Linux Stable
channel = split("-", var.os_image)[1]
}

Expand All @@ -31,64 +31,64 @@ resource "azurerm_availability_set" "controllers" {
managed = true
}

data "azurerm_image" "custom" {
name = var.custom_image_name
resource_group_name = var.custom_image_resource_group_name
}
# data "azurerm_image" "custom" {
# name = var.custom_image_name
# resource_group_name = var.custom_image_resource_group_name
# }

# Controller instances
resource "azurerm_virtual_machine" "controllers" {
resource "azurerm_linux_virtual_machine" "controllers" {
count = var.controller_count
resource_group_name = azurerm_resource_group.cluster.name

name = "${var.cluster_name}-controller-${count.index}"
location = var.region
availability_set_id = azurerm_availability_set.controllers.id
vm_size = var.controller_type

# boot
storage_image_reference {
id = data.azurerm_image.custom.id
}
size = var.controller_type

# storage
storage_os_disk {
name = "${var.cluster_name}-controller-${count.index}"
create_option = "FromImage"
caching = "ReadWrite"
disk_size_gb = var.disk_size
os_type = "Linux"
managed_disk_type = "Premium_LRS"
os_disk {
name = "${var.cluster_name}-controller-${count.index}"
caching = "None"
disk_size_gb = var.disk_size
storage_account_type = "Premium_LRS"
}

# network
network_interface_ids = [azurerm_network_interface.controllers[count.index].id]
# Flatcar Container Linux
source_image_reference {
publisher = "Kinvolk"
offer = "flatcar-container-linux-free"
sku = local.channel
version = "latest"
}

os_profile {
computer_name = "${var.cluster_name}-controller-${count.index}"
admin_username = "core"
custom_data = data.ct_config.controller-ignitions[count.index].rendered
plan {
name = local.channel
publisher = "kinvolk"
product = "flatcar-container-linux-free"
}

# network
network_interface_ids = [
azurerm_network_interface.controllers.*.id[count.index]
]

# Azure requires setting admin_ssh_key, though Ignition custom_data handles it too
computer_name = "${var.cluster_name}-controller-${count.index}"
custom_data = base64encode(data.ct_config.controller-ignitions.*.rendered[count.index])
admin_username = "core"
admin_ssh_key {
username = "core"
public_key = var.ssh_keys[0]
}
# Azure mandates setting an ssh_key, provide just a single key as the
# others are handled with Ignition custom_data.
os_profile_linux_config {
disable_password_authentication = true

ssh_keys {
path = "/home/core/.ssh/authorized_keys"
key_data = var.ssh_keys[0]
}
}

# lifecycle
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true

lifecycle {
ignore_changes = [
storage_os_disk,
os_profile,
custom_data,
os_disk,
]
}
}
Expand All @@ -98,21 +98,28 @@ resource "azurerm_network_interface" "controllers" {
count = var.controller_count
resource_group_name = azurerm_resource_group.cluster.name

name = "${var.cluster_name}-controller-${count.index}"
location = azurerm_resource_group.cluster.location
network_security_group_id = azurerm_network_security_group.controller.id
name = "${var.cluster_name}-controller-${count.index}"
location = azurerm_resource_group.cluster.location

ip_configuration {
name = "ip0"
subnet_id = azurerm_subnet.controller.id
private_ip_address_allocation = "dynamic"

# public IPv4
public_ip_address_id = azurerm_public_ip.controllers[count.index].id
private_ip_address_allocation = "Dynamic"
# instance public IPv4
public_ip_address_id = azurerm_public_ip.controllers.*.id[count.index]
}
}

# Add controller NICs to the controller backend address pool
# Associate controller network interface with controller security group
resource "azurerm_network_interface_security_group_association" "controllers" {
count = var.controller_count

network_interface_id = azurerm_network_interface.controllers[count.index].id
network_security_group_id = azurerm_network_security_group.controller.id
}


# Associate controller network interface with controller backend address pool
resource "azurerm_network_interface_backend_address_pool_association" "controllers" {
count = var.controller_count

Expand Down Expand Up @@ -156,6 +163,7 @@ data "template_file" "controller-configs" {
ssh_keys = jsonencode(var.ssh_keys)
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
cluster_domain_suffix = var.cluster_domain_suffix
enable_tls_bootstrap = var.enable_tls_bootstrap
}
}

Expand Down
15 changes: 15 additions & 0 deletions assets/terraform-modules/azure/flatcar-linux/kubernetes/lb.tf
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,21 @@ resource "azurerm_lb_rule" "ingress-https" {
probe_id = azurerm_lb_probe.ingress.id
}

# Worker outbound TCP/UDP SNAT
resource "azurerm_lb_outbound_rule" "worker-outbound" {
resource_group_name = azurerm_resource_group.cluster.name

name = "worker"
loadbalancer_id = azurerm_lb.cluster.id
frontend_ip_configuration {
name = "ingress"
}

protocol = "All"
backend_address_pool_id = azurerm_lb_backend_address_pool.worker.id
}


# Address pool of controllers
resource "azurerm_lb_backend_address_pool" "controller" {
resource_group_name = azurerm_resource_group.cluster.name
Expand Down
10 changes: 10 additions & 0 deletions assets/terraform-modules/azure/flatcar-linux/kubernetes/network.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,20 @@ resource "azurerm_subnet" "controller" {
address_prefix = cidrsubnet(var.host_cidr, 1, 0)
}

resource "azurerm_subnet_network_security_group_association" "controller" {
subnet_id = azurerm_subnet.controller.id
network_security_group_id = azurerm_network_security_group.controller.id
}

resource "azurerm_subnet" "worker" {
resource_group_name = azurerm_resource_group.cluster.name

name = "worker"
virtual_network_name = azurerm_virtual_network.network.name
address_prefix = cidrsubnet(var.host_cidr, 1, 1)
}

resource "azurerm_subnet_network_security_group_association" "worker" {
subnet_id = azurerm_subnet.worker.id
network_security_group_id = azurerm_network_security_group.worker.id
}

0 comments on commit b8bd858

Please sign in to comment.