Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Add tinkerbell platform
Browse files Browse the repository at this point in the history
This commit adds tinkerbell as a supported platform by the Lokomotive.

The Terraform code consumes newly introduced controller and worker
Terraform modules, which reduces the amount of code required for
introducing this new platform.

The commit currently lacks several parts, which will be added at later
stage:
- Unit tests
- Configuration validation rules
- CI implementation
- Reference documentation
- Quick start guide

Closes #382.

Signed-off-by: Mateusz Gozdek <mateusz@kinvolk.io>
  • Loading branch information
invidian committed May 13, 2020
1 parent 48d6f5e commit 4fdf684
Show file tree
Hide file tree
Showing 18 changed files with 1,029 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "../../bootkube"

cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = module.controller.etcd_servers
asset_dir = var.asset_dir
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
enable_reporting = var.enable_reporting
enable_aggregation = var.enable_aggregation

certs_validity_period_hours = var.certs_validity_period_hours
}
54 changes: 54 additions & 0 deletions assets/lokomotive-kubernetes/terraform-modules/tinkerbell/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
module "controller" {
source = "../controller"

cluster_name = var.cluster_name
dns_zone = var.dns_zone
node_count = var.node_count
cluster_dns_service_ip = module.bootkube.cluster_dns_service_ip
ssh_keys = var.ssh_keys
clc_snippets = var.clc_snippets
bootkube_rkt_extra_args = [
# So /etc/hosts changes passed via CLC snippets have effect in bootkube rkt container.
# This allows to workaround a requirement of DNS server resolving etcd DNS names etc.
"--hosts-entry=host",
]
clc_snippet_index = <<EOF
storage:
files:
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline: |
controller%d
EOF
}

resource "tinkerbell_template" "main" {
count = var.node_count

name = "${var.cluster_name}-controller-${count.index}"

content = templatefile("${path.module}/templates/flatcar-install.tmpl", {
ignition_config = module.controller.clc_configs[count.index]
flatcar_install_base_url = var.flatcar_install_base_url
machine = "${var.cluster_name}-controller-${count.index}"
os_version = var.os_version
os_channel = var.os_channel
})
}

resource "tinkerbell_target" "main" {
count = var.node_count

data = <<EOF
{"targets": {"${var.cluster_name}-controller-${count.index}": {"ipv4_addr": "${var.ip_addresses[count.index]}"}}}
EOF
}

resource "tinkerbell_workflow" "main" {
count = var.node_count

target = tinkerbell_target.main[count.index].id
template = tinkerbell_template.main[count.index].id
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
output "kubeconfig-admin" {
value = module.bootkube.kubeconfig-admin
}

# values.yaml content for all deployed charts.
output "pod-checkpointer_values" {
value = module.bootkube.pod-checkpointer_values
}

output "kube-apiserver_values" {
value = module.bootkube.kube-apiserver_values
}

output "kubernetes_values" {
value = module.bootkube.kubernetes_values
}

output "kubelet_values" {
value = module.bootkube.kubelet_values
}

output "calico_values" {
value = module.bootkube.calico_values
}

output "kubeconfig" {
value = module.bootkube.kubeconfig-kubelet
}

output "cluster_dns_service_ip" {
value = module.bootkube.cluster_dns_service_ip
}
97 changes: 97 additions & 0 deletions assets/lokomotive-kubernetes/terraform-modules/tinkerbell/ssh.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Secure copy etcd TLS assets and kubeconfig to controllers. Activates kubelet.service
resource "null_resource" "copy-controller-secrets" {
count = length(var.ip_addresses)

connection {
type = "ssh"
host = var.ip_addresses[count.index]
user = "core"
timeout = "60m"
}

provisioner "file" {
content = module.bootkube.kubeconfig-kubelet
destination = "$HOME/kubeconfig"
}

provisioner "file" {
content = module.bootkube.etcd_ca_cert
destination = "$HOME/etcd-client-ca.crt"
}

provisioner "file" {
content = module.bootkube.etcd_client_cert
destination = "$HOME/etcd-client.crt"
}

provisioner "file" {
content = module.bootkube.etcd_client_key
destination = "$HOME/etcd-client.key"
}

provisioner "file" {
content = module.bootkube.etcd_server_cert
destination = "$HOME/etcd-server.crt"
}

provisioner "file" {
content = module.bootkube.etcd_server_key
destination = "$HOME/etcd-server.key"
}

provisioner "file" {
content = module.bootkube.etcd_peer_cert
destination = "$HOME/etcd-peer.crt"
}

provisioner "file" {
content = module.bootkube.etcd_peer_key
destination = "$HOME/etcd-peer.key"
}

provisioner "remote-exec" {
inline = [
"sudo mkdir -p /etc/ssl/etcd/etcd",
"sudo mv etcd-client* /etc/ssl/etcd/",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/server-ca.crt",
"sudo mv etcd-server.crt /etc/ssl/etcd/etcd/server.crt",
"sudo mv etcd-server.key /etc/ssl/etcd/etcd/server.key",
"sudo cp /etc/ssl/etcd/etcd-client-ca.crt /etc/ssl/etcd/etcd/peer-ca.crt",
"sudo mv etcd-peer.crt /etc/ssl/etcd/etcd/peer.crt",
"sudo mv etcd-peer.key /etc/ssl/etcd/etcd/peer.key",
"sudo chown -R etcd:etcd /etc/ssl/etcd",
"sudo chmod -R 500 /etc/ssl/etcd",
"sudo mv $HOME/kubeconfig /etc/kubernetes/kubeconfig",
]
}
}

# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
# Without depends_on, this remote-exec may start before the kubeconfig copy.
# Terraform only does one task at a time, so it would try to bootstrap
# while no Kubelets are running.
depends_on = [
null_resource.copy-controller-secrets,
]

connection {
type = "ssh"
host = var.ip_addresses[0]
user = "core"
timeout = "15m"
}

provisioner "file" {
source = var.asset_dir
destination = "$HOME/assets"
}

provisioner "remote-exec" {
inline = [
"sudo mv $HOME/assets /opt/bootkube",
"sudo systemctl start bootkube",
]
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
version: '0.1'
name: flatcar-install
global_timeout: 1800
tasks:
- name: "flatcar-install"
worker: "{{index .Targets "${machine}" "ipv4_addr"}}"
volumes:
- /dev:/dev
- /statedir:/statedir
actions:
- name: "dump-ignition"
image: alpine # TODO docs: This image must be pushed to Tinkerbell registry.
command:
- sh
- -c
- echo '${base64encode(ignition_config)}' | base64 -d > /statedir/ignition.json
- name: "flatcar-install"
image: flatcar-install # TODO docs: This image must be pushed to Tinkerbell registry.
command:
- -s # Experimentally use the smallest disk to install the OS.
%{~ if os_version != "" ~}
- -V
- ${os_version}
%{~ endif ~}
%{~ if os_channel != "" ~}
- -C
- ${os_channel}
%{~ endif ~}
- -i
- /statedir/ignition.json
%{~ if flatcar_install_base_url != "" ~}
- -b
- ${flatcar_install_base_url}
%{~ endif ~}
- name: "reboot" # This task shouldn't really be there, but there is no other way to reboot the worker into target OS in Tinkerbell for now.
image: alpine
command:
- sh
- -c
- 'echo 1 > /proc/sys/kernel/sysrq; echo b > /proc/sysrq-trigger'
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
variable "dns_zone" {
type = string
}

variable "cluster_name" {
type = string
}

variable "ip_addresses" {
type = list(string)
}

variable "flatcar_install_base_url" {
type = string
default = ""
}

variable "os_version" {
type = string
default = ""
}

variable "os_channel" {
type = string
default = ""
}

variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = string
}

# Required variables.
variable "ssh_keys" {
type = list(string)
description = "List of SSH public keys for user `core`. Each element must be specified in a valid OpenSSH public key format, as defined in RFC 4253 Section 6.6, e.g. 'ssh-rsa AAAAB3N...'."
default = []
}

# Optional variables.
variable "node_count" {
type = number
description = "Number of nodes to create."
default = 1
}

variable "clc_snippets" {
type = list(string)
description = "Extra CLC snippets to include in the configuration."
default = []
}

variable "cluster_domain_suffix" {
type = string
description = "Cluster domain suffix. Passed to kubelet as --cluster_domain flag."
default = "cluster.local"
}

variable "network_mtu" {
description = "CNI interface MTU"
type = number
default = 1500
}

variable "pod_cidr" {
description = "CIDR IP range to assign Kubernetes pods"
type = string
default = "10.2.0.0/16"
}

variable "service_cidr" {
description = <<EOF
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
EOF
type = string
default = "10.3.0.0/24"
}

variable "enable_reporting" {
type = bool
description = "Enable usage or analytics reporting to upstream component owners (Tigera: Calico)"
default = false
}

variable "certs_validity_period_hours" {
description = "Validity of all the certificates in hours"
type = number
default = 8760
}

variable "enable_aggregation" {
description = "Enable the Kubernetes Aggregation Layer (defaults to false, recommended)"
type = bool
default = true
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Terraform version and plugin versions

terraform {
required_providers {
null = "~> 2.1"
}
}
Loading

0 comments on commit 4fdf684

Please sign in to comment.