From adaafedb386518d759894fb59ffd9a89be832b7e Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Mon, 3 Feb 2020 16:16:06 +0100 Subject: [PATCH] Move terraform-render-bootkube module into bootkube directory Based on terraform-render-bootkube@1b49787f016ce299833d2544b6b983d3f7ea4155. Signed-off-by: Mateusz Gozdek --- Makefile | 4 - aws/flatcar-linux/kubernetes/bootkube.tf | 2 +- azure/flatcar-linux/kubernetes/bootkube.tf | 2 +- .../flatcar-linux/kubernetes/bootkube.tf | 2 +- bootkube/.gitignore | 4 + bootkube/LICENSE | 21 ++ bootkube/README.md | 49 +++++ bootkube/assets.tf | 136 ++++++++++++ bootkube/conditional.tf | 111 ++++++++++ bootkube/outputs.tf | 67 ++++++ .../bootstrap-apiserver.yaml | 56 +++++ .../bootstrap-controller-manager.yaml | 40 ++++ .../bootstrap-scheduler.yaml | 25 +++ bootkube/resources/charts/calico.yaml | 11 + bootkube/resources/charts/calico/.helmignore | 22 ++ bootkube/resources/charts/calico/Chart.yaml | 18 ++ .../charts/calico/crds/bgpconfigurations.yaml | 12 ++ .../charts/calico/crds/bgppeers.yaml | 12 ++ .../charts/calico/crds/blockaffinities.yaml | 12 ++ .../calico/crds/clusterinformations.yaml | 12 ++ .../calico/crds/felixconfigurations.yaml | 12 ++ .../calico/crds/globalnetworkpolicies.yaml | 12 ++ .../charts/calico/crds/globalnetworksets.yaml | 12 ++ .../charts/calico/crds/hostendpoints.yaml | 12 ++ .../charts/calico/crds/ipamblocks.yaml | 12 ++ .../charts/calico/crds/ipamconfigs.yaml | 12 ++ .../charts/calico/crds/ipamhandles.yaml | 12 ++ .../resources/charts/calico/crds/ippools.yaml | 12 ++ .../charts/calico/crds/networkpolicies.yaml | 12 ++ .../charts/calico/crds/networksets.yaml | 12 ++ .../templates/cluster-role-binding.yaml | 12 ++ .../charts/calico/templates/cluster-role.yaml | 109 ++++++++++ .../charts/calico/templates/config.yaml | 45 ++++ .../charts/calico/templates/daemonset.yaml | 191 ++++++++++++++++ .../templates/ippools-default-ipv4.yaml | 10 + .../calico/templates/service-account.yaml | 5 + bootkube/resources/charts/calico/values.yaml | 11 + bootkube/resources/charts/flannel.yaml | 4 + bootkube/resources/charts/flannel/.helmignore | 22 ++ bootkube/resources/charts/flannel/Chart.yaml | 18 ++ .../templates/cluster-role-binding.yaml | 12 ++ .../flannel/templates/cluster-role.yaml | 24 +++ .../charts/flannel/templates/config.yaml | 37 ++++ .../charts/flannel/templates/daemonset.yaml | 85 ++++++++ .../flannel/templates/service-account.yaml | 5 + bootkube/resources/charts/flannel/values.yaml | 4 + bootkube/resources/charts/kube-router.yaml | 4 + .../resources/charts/kube-router/.helmignore | 22 ++ .../resources/charts/kube-router/Chart.yaml | 18 ++ .../templates/cluster-role-binding.yaml | 12 ++ .../kube-router/templates/cluster-role.yaml | 33 +++ .../charts/kube-router/templates/config.yaml | 30 +++ .../kube-router/templates/daemonset.yaml | 90 ++++++++ .../templates/service-account.yaml | 5 + .../resources/charts/kube-router/values.yaml | 4 + bootkube/resources/charts/kubelet.yaml | 3 + bootkube/resources/charts/kubelet/Chart.yaml | 18 ++ .../charts/kubelet/templates/kubelet-ds.yaml | 137 ++++++++++++ bootkube/resources/charts/kubelet/values.yaml | 3 + bootkube/resources/charts/kubernetes.yaml | 43 ++++ .../resources/charts/kubernetes/.helmignore | 22 ++ .../resources/charts/kubernetes/Chart.yaml | 18 ++ .../coredns-cluster-role-binding.yaml | 16 ++ .../templates/coredns-cluster-role.yaml | 21 ++ .../kubernetes/templates/coredns-config.yaml | 27 +++ .../templates/coredns-deployment.yaml | 101 +++++++++ .../templates/coredns-service-account.yaml | 5 + .../kubernetes/templates/coredns-service.yaml | 22 ++ .../kube-apiserver-role-binding.yaml | 12 ++ .../templates/kube-apiserver-sa.yaml | 5 + .../templates/kube-apiserver-secret.yaml | 17 ++ .../kubernetes/templates/kube-apiserver.yaml | 93 ++++++++ .../kube-controller-manager-disruption.yaml | 11 + .../kube-controller-manager-role-binding.yaml | 12 ++ .../templates/kube-controller-manager-sa.yaml | 5 + .../kube-controller-manager-secret.yaml | 11 + .../templates/kube-controller-manager.yaml | 96 +++++++++ .../templates/kube-proxy-role-binding.yaml | 12 ++ .../kubernetes/templates/kube-proxy-sa.yaml | 5 + .../kubernetes/templates/kube-proxy.yaml | 76 +++++++ .../templates/kube-scheduler-disruption.yaml | 11 + .../kube-scheduler-role-binding.yaml | 12 ++ .../templates/kube-scheduler-sa.yaml | 5 + ...heduler-volume-scheduler-role-binding.yaml | 13 ++ .../kubernetes/templates/kube-scheduler.yaml | 63 ++++++ .../templates/kubeconfig-in-cluster.yaml | 24 +++ .../kubelet-delete-cluster-role-binding.yaml | 12 ++ .../kubelet-delete-cluster-role.yaml | 10 + .../kubelet-nodes-cluster-role-binding.yaml | 12 ++ ...let-pod-checkpointer-psp-role-binding.yaml | 13 ++ ...pod-checkpointer-cluster-role-binding.yaml | 12 ++ .../pod-checkpointer-cluster-role.yaml | 11 + .../templates/pod-checkpointer-psp.yaml | 33 +++ .../pod-checkpointer-role-binding.yaml | 27 +++ .../templates/pod-checkpointer-role.yaml | 24 +++ .../templates/pod-checkpointer-sa.yaml | 5 + .../templates/pod-checkpointer.yaml | 72 +++++++ .../kubernetes/templates/psp-privileged.yaml | 65 ++++++ .../kubernetes/templates/psp-restricted.yaml | 72 +++++++ .../resources/charts/kubernetes/values.yaml | 43 ++++ bootkube/resources/kubeconfig-admin | 18 ++ bootkube/resources/kubeconfig-kubelet | 16 ++ bootkube/terraform.tfvars.example | 5 + bootkube/tls-aggregation.tf | 105 +++++++++ bootkube/tls-etcd.tf | 203 ++++++++++++++++++ bootkube/tls-k8s.tf | 194 +++++++++++++++++ bootkube/variables.tf | 141 ++++++++++++ bootkube/versions.tf | 10 + docs/advanced/customization.md | 2 +- .../flatcar-linux/kubernetes/bootkube.tf | 2 +- .../flatcar-linux/kubernetes/bootkube.tf | 2 +- packet/flatcar-linux/kubernetes/bootkube.tf | 2 +- scripts/update-terraform-render-bootkube.sh | 19 -- 113 files changed, 3569 insertions(+), 30 deletions(-) create mode 100644 bootkube/.gitignore create mode 100644 bootkube/LICENSE create mode 100644 bootkube/README.md create mode 100644 bootkube/assets.tf create mode 100644 bootkube/conditional.tf create mode 100644 bootkube/outputs.tf create mode 100644 bootkube/resources/bootstrap-manifests/bootstrap-apiserver.yaml create mode 100644 bootkube/resources/bootstrap-manifests/bootstrap-controller-manager.yaml create mode 100644 bootkube/resources/bootstrap-manifests/bootstrap-scheduler.yaml create mode 100644 bootkube/resources/charts/calico.yaml create mode 100644 bootkube/resources/charts/calico/.helmignore create mode 100644 bootkube/resources/charts/calico/Chart.yaml create mode 100644 bootkube/resources/charts/calico/crds/bgpconfigurations.yaml create mode 100644 bootkube/resources/charts/calico/crds/bgppeers.yaml create mode 100644 bootkube/resources/charts/calico/crds/blockaffinities.yaml create mode 100644 bootkube/resources/charts/calico/crds/clusterinformations.yaml create mode 100644 bootkube/resources/charts/calico/crds/felixconfigurations.yaml create mode 100644 bootkube/resources/charts/calico/crds/globalnetworkpolicies.yaml create mode 100644 bootkube/resources/charts/calico/crds/globalnetworksets.yaml create mode 100644 bootkube/resources/charts/calico/crds/hostendpoints.yaml create mode 100644 bootkube/resources/charts/calico/crds/ipamblocks.yaml create mode 100644 bootkube/resources/charts/calico/crds/ipamconfigs.yaml create mode 100644 bootkube/resources/charts/calico/crds/ipamhandles.yaml create mode 100644 bootkube/resources/charts/calico/crds/ippools.yaml create mode 100644 bootkube/resources/charts/calico/crds/networkpolicies.yaml create mode 100644 bootkube/resources/charts/calico/crds/networksets.yaml create mode 100644 bootkube/resources/charts/calico/templates/cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/calico/templates/cluster-role.yaml create mode 100644 bootkube/resources/charts/calico/templates/config.yaml create mode 100644 bootkube/resources/charts/calico/templates/daemonset.yaml create mode 100644 bootkube/resources/charts/calico/templates/ippools-default-ipv4.yaml create mode 100644 bootkube/resources/charts/calico/templates/service-account.yaml create mode 100644 bootkube/resources/charts/calico/values.yaml create mode 100644 bootkube/resources/charts/flannel.yaml create mode 100644 bootkube/resources/charts/flannel/.helmignore create mode 100644 bootkube/resources/charts/flannel/Chart.yaml create mode 100644 bootkube/resources/charts/flannel/templates/cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/flannel/templates/cluster-role.yaml create mode 100644 bootkube/resources/charts/flannel/templates/config.yaml create mode 100644 bootkube/resources/charts/flannel/templates/daemonset.yaml create mode 100644 bootkube/resources/charts/flannel/templates/service-account.yaml create mode 100644 bootkube/resources/charts/flannel/values.yaml create mode 100644 bootkube/resources/charts/kube-router.yaml create mode 100644 bootkube/resources/charts/kube-router/.helmignore create mode 100644 bootkube/resources/charts/kube-router/Chart.yaml create mode 100644 bootkube/resources/charts/kube-router/templates/cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/kube-router/templates/cluster-role.yaml create mode 100644 bootkube/resources/charts/kube-router/templates/config.yaml create mode 100644 bootkube/resources/charts/kube-router/templates/daemonset.yaml create mode 100644 bootkube/resources/charts/kube-router/templates/service-account.yaml create mode 100644 bootkube/resources/charts/kube-router/values.yaml create mode 100644 bootkube/resources/charts/kubelet.yaml create mode 100644 bootkube/resources/charts/kubelet/Chart.yaml create mode 100644 bootkube/resources/charts/kubelet/templates/kubelet-ds.yaml create mode 100644 bootkube/resources/charts/kubelet/values.yaml create mode 100644 bootkube/resources/charts/kubernetes.yaml create mode 100644 bootkube/resources/charts/kubernetes/.helmignore create mode 100644 bootkube/resources/charts/kubernetes/Chart.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-cluster-role.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-config.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-deployment.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-service-account.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/coredns-service.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-apiserver-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-apiserver-sa.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-apiserver-secret.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-apiserver.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-controller-manager-disruption.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-controller-manager-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-controller-manager-sa.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-controller-manager-secret.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-controller-manager.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-proxy-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-proxy-sa.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-proxy.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-scheduler-disruption.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-scheduler-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-scheduler-sa.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-scheduler-volume-scheduler-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kube-scheduler.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kubeconfig-in-cluster.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kubelet-nodes-cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/kubelet-pod-checkpointer-psp-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-psp.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role-binding.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer-sa.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/pod-checkpointer.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/psp-privileged.yaml create mode 100644 bootkube/resources/charts/kubernetes/templates/psp-restricted.yaml create mode 100644 bootkube/resources/charts/kubernetes/values.yaml create mode 100644 bootkube/resources/kubeconfig-admin create mode 100644 bootkube/resources/kubeconfig-kubelet create mode 100644 bootkube/terraform.tfvars.example create mode 100644 bootkube/tls-aggregation.tf create mode 100644 bootkube/tls-etcd.tf create mode 100644 bootkube/tls-k8s.tf create mode 100644 bootkube/variables.tf create mode 100644 bootkube/versions.tf delete mode 100755 scripts/update-terraform-render-bootkube.sh diff --git a/Makefile b/Makefile index 2d07bd65..3faf447e 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,3 @@ run-e2e-tests: kube-hunter kube-hunter: KUBECONFIG=${kubeconfig} ${kubehunter} - -.PHONY: update-terraform-render-bootkube -update-terraform-render-bootkube: - ./scripts/update-terraform-render-bootkube.sh $(VERSION) diff --git a/aws/flatcar-linux/kubernetes/bootkube.tf b/aws/flatcar-linux/kubernetes/bootkube.tf index 76e4ea0b..e5b6bbca 100644 --- a/aws/flatcar-linux/kubernetes/bootkube.tf +++ b/aws/flatcar-linux/kubernetes/bootkube.tf @@ -1,6 +1,6 @@ # Self-hosted Kubernetes assets (kubeconfig, manifests) module "bootkube" { - source = "github.com/kinvolk/terraform-render-bootkube?ref=1b49787f016ce299833d2544b6b983d3f7ea4155" + source = "../../../bootkube" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/azure/flatcar-linux/kubernetes/bootkube.tf b/azure/flatcar-linux/kubernetes/bootkube.tf index 3e9bb650..040cfa03 100644 --- a/azure/flatcar-linux/kubernetes/bootkube.tf +++ b/azure/flatcar-linux/kubernetes/bootkube.tf @@ -1,6 +1,6 @@ # Self-hosted Kubernetes assets (kubeconfig, manifests) module "bootkube" { - source = "github.com/kinvolk/terraform-render-bootkube?ref=1b49787f016ce299833d2544b6b983d3f7ea4155" + source = "../../../bootkube" cluster_name = var.cluster_name api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)] diff --git a/bare-metal/flatcar-linux/kubernetes/bootkube.tf b/bare-metal/flatcar-linux/kubernetes/bootkube.tf index 881c7d75..7e400b7c 100644 --- a/bare-metal/flatcar-linux/kubernetes/bootkube.tf +++ b/bare-metal/flatcar-linux/kubernetes/bootkube.tf @@ -1,6 +1,6 @@ # Self-hosted Kubernetes assets (kubeconfig, manifests) module "bootkube" { - source = "github.com/kinvolk/terraform-render-bootkube?ref=1b49787f016ce299833d2544b6b983d3f7ea4155" + source = "../../../bootkube" cluster_name = var.cluster_name api_servers = [var.k8s_domain_name] diff --git a/bootkube/.gitignore b/bootkube/.gitignore new file mode 100644 index 00000000..2061135e --- /dev/null +++ b/bootkube/.gitignore @@ -0,0 +1,4 @@ +*.tfvars +.terraform +*.tfstate* +assets diff --git a/bootkube/LICENSE b/bootkube/LICENSE new file mode 100644 index 00000000..0c552266 --- /dev/null +++ b/bootkube/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Dalton Hubble + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/bootkube/README.md b/bootkube/README.md new file mode 100644 index 00000000..169c9f6c --- /dev/null +++ b/bootkube/README.md @@ -0,0 +1,49 @@ +# bootkube + +`bootkube` is a Terraform module that renders [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) assets for bootstrapping a Kubernetes cluster. + +## Audience + +`bootkube` is a low-level component of the [Lokomotive](https://github.com/kinvolk/lokomotive-kubernetes) Kubernetes distribution. Use Lokomotive modules to create and manage Kubernetes clusters across supported platforms. Use the bootkube module if you'd like to customize a Kubernetes control plane or build your own distribution. + +## Usage + +Use the module to declare bootkube assets. Check [variables.tf](variables.tf) for options and [terraform.tfvars.example](terraform.tfvars.example) for examples. + +```hcl +module "bootkube" { + source = "git::https://github.com/kinvolk/lokomotive-kubernetes//bootkube?ref=SHA" + + cluster_name = "example" + api_servers = ["node1.example.com"] + etcd_servers = ["node1.example.com"] + asset_dir = "/home/core/clusters/mycluster" +} +``` + +Generate the assets. + +```sh +terraform init +terraform plan +terraform apply +``` + +Find bootkube assets rendered to the `asset_dir` path. That's it. + +### Comparison + +Render bootkube assets directly with bootkube v0.14.0. + +```sh +bootkube render --asset-dir=assets --api-servers=https://node1.example.com:6443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379 +``` + +Compare assets. Rendered assets may differ slightly from bootkube assets to reflect decisions made by the [Lokomotive](https://github.com/kinvolk/lokomotive-kubernetes) distribution. + +```sh +pushd /home/core/mycluster +mv manifests-networking/* manifests +popd +diff -rw assets /home/core/mycluster +``` diff --git a/bootkube/assets.tf b/bootkube/assets.tf new file mode 100644 index 00000000..d7702313 --- /dev/null +++ b/bootkube/assets.tf @@ -0,0 +1,136 @@ +# Self-hosted Kubernetes bootstrap-manifests +resource "template_dir" "bootstrap-manifests" { + source_dir = "${replace(path.module, path.cwd, ".")}/resources/bootstrap-manifests" + destination_dir = "${var.asset_dir}/bootstrap-manifests" + + vars = { + hyperkube_image = var.container_images["hyperkube"] + etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers)) + cloud_provider = var.cloud_provider + pod_cidr = var.pod_cidr + service_cidr = var.service_cidr + trusted_certs_dir = var.trusted_certs_dir + } +} + +# Populate kubernetes chart values file named kubernetes.yaml. +resource "local_file" "kubernetes" { + content = data.template_file.kubernetes.rendered + filename = "${var.asset_dir}/charts/kube-system/kubernetes.yaml" +} + +# Populate kubernetes control plane chart. +# TODO: Currently, there is no way in Terraform to copy local directory, so we use `template_dir` for it. +# The downside is, that any Terraform templating syntax stored in this directory will be evaluated, which may bring unexpected results. +resource "template_dir" "kubernetes" { + source_dir = "${replace(path.module, path.cwd, ".")}/resources/charts/kubernetes" + destination_dir = "${var.asset_dir}/charts/kube-system/kubernetes" +} + +# Render kubernetes.yaml for kubernetes chart. +data "template_file" "kubernetes" { + template = "${file("${path.module}/resources/charts/kubernetes.yaml")}" + + vars = { + hyperkube_image = var.container_images["hyperkube"] + pod_checkpointer_image = var.container_images["pod_checkpointer"] + coredns_image = "${var.container_images["coredns"]}${var.container_arch}" + etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers)) + control_plane_replicas = max(2, length(var.etcd_servers)) + cloud_provider = var.cloud_provider + pod_cidr = var.pod_cidr + service_cidr = var.service_cidr + cluster_domain_suffix = var.cluster_domain_suffix + cluster_dns_service_ip = cidrhost(var.service_cidr, 10) + trusted_certs_dir = var.trusted_certs_dir + ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) + ca_key = base64encode(tls_private_key.kube-ca.private_key_pem) + server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) + apiserver_key = base64encode(tls_private_key.apiserver.private_key_pem) + apiserver_cert = base64encode(tls_locally_signed_cert.apiserver.cert_pem) + serviceaccount_pub = base64encode(tls_private_key.service-account.public_key_pem) + serviceaccount_key = base64encode(tls_private_key.service-account.private_key_pem) + etcd_ca_cert = base64encode(tls_self_signed_cert.etcd-ca.cert_pem) + etcd_client_cert = base64encode(tls_locally_signed_cert.client.cert_pem) + etcd_client_key = base64encode(tls_private_key.client.private_key_pem) + enable_aggregation = var.enable_aggregation + aggregation_ca_cert = var.enable_aggregation == true ? base64encode(join(" ", tls_self_signed_cert.aggregation-ca.*.cert_pem)) : "" + aggregation_client_cert = var.enable_aggregation == true ? base64encode(join(" ", tls_locally_signed_cert.aggregation-client.*.cert_pem)) : "" + aggregation_client_key = var.enable_aggregation == true ? base64encode(join(" ", tls_private_key.aggregation-client.*.private_key_pem)) : "" + } +} + +# Render kubelet.yaml for kubelet chart +data "template_file" "kubelet" { + template = "${file("${path.module}/resources/charts/kubelet.yaml")}" + + vars = { + hyperkube_image = var.container_images["hyperkube"] + cluster_dns_service_ip = cidrhost(var.service_cidr, 10) + cluster_domain_suffix = var.cluster_domain_suffix + } +} + +# Populate kubelet chart values file named kubelet.yaml. +resource "local_file" "kubelet" { + content = data.template_file.kubelet.rendered + filename = "${var.asset_dir}/charts/kube-system/kubelet.yaml" +} + +# Populate kubelet chart. +# TODO: Currently, there is no way in Terraform to copy local directory, so we use `template_dir` for it. +# The downside is, that any Terraform templating syntax stored in this directory will be evaluated, which may bring unexpected results. +resource "template_dir" "kubelet" { + source_dir = "${replace(path.module, path.cwd, ".")}/resources/charts/kubelet" + destination_dir = "${var.asset_dir}/charts/kube-system/kubelet" +} + +# Generated kubeconfig for Kubelets +resource "local_file" "kubeconfig-kubelet" { + content = data.template_file.kubeconfig-kubelet.rendered + filename = "${var.asset_dir}/auth/kubeconfig-kubelet" +} + +# Generated admin kubeconfig (bootkube requires it be at auth/kubeconfig) +# https://github.com/kubernetes-incubator/bootkube/blob/master/pkg/bootkube/bootkube.go#L42 +resource "local_file" "kubeconfig-admin" { + content = data.template_file.kubeconfig-admin.rendered + filename = "${var.asset_dir}/auth/kubeconfig" +} + +# Generated admin kubeconfig in a file named after the cluster +resource "local_file" "kubeconfig-admin-named" { + content = data.template_file.kubeconfig-admin.rendered + filename = "${var.asset_dir}/auth/${var.cluster_name}-config" +} + +data "template_file" "kubeconfig-kubelet" { + template = file("${path.module}/resources/kubeconfig-kubelet") + + vars = { + ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) + kubelet_cert = base64encode(tls_locally_signed_cert.kubelet.cert_pem) + kubelet_key = base64encode(tls_private_key.kubelet.private_key_pem) + server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) + } +} + +# If var.api_servers_external isn't set, use var.api_servers. +# This is for supporting separate API server URLs for external clients in a backward-compatible way. +# The use of split() and join() here is because Terraform's conditional operator ('?') cannot be +# used with lists. +locals { + api_servers_external = split(",", join(",", var.api_servers_external) == "" ? join(",", var.api_servers) : join(",", var.api_servers_external)) +} + +data "template_file" "kubeconfig-admin" { + template = file("${path.module}/resources/kubeconfig-admin") + + vars = { + name = var.cluster_name + ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem) + kubelet_cert = base64encode(tls_locally_signed_cert.admin.cert_pem) + kubelet_key = base64encode(tls_private_key.admin.private_key_pem) + server = format("https://%s:%s", local.api_servers_external[0], var.external_apiserver_port) + } +} diff --git a/bootkube/conditional.tf b/bootkube/conditional.tf new file mode 100644 index 00000000..27488010 --- /dev/null +++ b/bootkube/conditional.tf @@ -0,0 +1,111 @@ +# Assets generated only when certain options are chosen + +# Populate flannel chart values file named flannel.yaml. +resource "local_file" "flannel" { + count = var.networking == "flannel" ? 1 : 0 + content = templatefile("${path.module}/resources/charts/flannel.yaml",{ + flannel_image = "${var.container_images["flannel"]}${var.container_arch}" + flannel_cni_image = var.container_images["flannel_cni"] + pod_cidr = var.pod_cidr + }) + filename = "${var.asset_dir}/charts/kube-system/flannel.yaml" +} + +# Populate flannel chart. +# TODO: Currently, there is no way in Terraform to copy local directory, so we use `template_dir` for it. +# The downside is, that any Terraform templating syntax stored in this directory will be evaluated, which may bring unexpected results. +resource "template_dir" "flannel" { + count = var.networking == "flannel" ? 1 : 0 + source_dir = "${replace(path.module, path.cwd, ".")}/resources/charts/flannel" + destination_dir = "${var.asset_dir}/charts/kube-system/flannel" +} + +# Render flannel.yaml for flannel chart. +data "template_file" "flannel" { + count = var.networking == "flannel" ? 1 : 0 + template = "${file("${path.module}/resources/charts/flannel.yaml")}" + + vars = { + flannel_image = "${var.container_images["flannel"]}${var.container_arch}" + flannel_cni_image = var.container_images["flannel_cni"] + pod_cidr = var.pod_cidr + } +} + +# Populate calico chart values file named calico.yaml. +resource "local_file" "calico" { + count = var.networking == "calico" ? 1 : 0 + content = templatefile("${path.module}/resources/charts/calico.yaml",{ + calico_image = var.container_images["calico"] + calico_cni_image = var.container_images["calico_cni"] + network_mtu = var.network_mtu + network_encapsulation = indent(2, var.network_encapsulation == "vxlan" ? "vxlanMode: Always" : "ipipMode: Always") + ipip_enabled = var.network_encapsulation == "ipip" ? true : false + ipip_readiness = var.network_encapsulation == "ipip" ? indent(16, "- --bird-ready") : "" + vxlan_enabled = var.network_encapsulation == "vxlan" ? true : false + network_ip_autodetection_method = var.network_ip_autodetection_method + pod_cidr = var.pod_cidr + enable_reporting = var.enable_reporting + }) + filename = "${var.asset_dir}/charts/kube-system/calico.yaml" +} + +# Populate calico chart. +# TODO: Currently, there is no way in Terraform to copy local directory, so we use `template_dir` for it. +# The downside is, that any Terraform templating syntax stored in this directory will be evaluated, which may bring unexpected results. +resource "template_dir" "calico" { + count = var.networking == "calico" ? 1 : 0 + source_dir = "${replace(path.module, path.cwd, ".")}/resources/charts/calico" + destination_dir = "${var.asset_dir}/charts/kube-system/calico" +} + +# Render calico.yaml for calico chart. +data "template_file" "calico" { + count = var.networking == "calico" ? 1 : 0 + template = "${file("${path.module}/resources/charts/calico.yaml")}" + + vars = { + calico_image = var.container_images["calico"] + calico_cni_image = var.container_images["calico_cni"] + network_mtu = var.network_mtu + network_encapsulation = indent(2, var.network_encapsulation == "vxlan" ? "vxlanMode: Always" : "ipipMode: Always") + ipip_enabled = var.network_encapsulation == "ipip" ? true : false + ipip_readiness = var.network_encapsulation == "ipip" ? indent(16, "- --bird-ready") : "" + vxlan_enabled = var.network_encapsulation == "vxlan" ? true : false + network_ip_autodetection_method = var.network_ip_autodetection_method + pod_cidr = var.pod_cidr + enable_reporting = var.enable_reporting + } +} + +# Populate kube-router chart values file named kube-router.yaml. +resource "local_file" "kube-router" { + count = var.networking == "kube-router" ? 1 : 0 + content = templatefile("${path.module}/resources/charts/kube-router.yaml",{ + kube_router_image = var.container_images["kube_router"] + flannel_cni_image = var.container_images["flannel_cni"] + network_mtu = var.network_mtu + }) + filename = "${var.asset_dir}/charts/kube-system/kube-router.yaml" +} + +# Populate kube-router chart. +# TODO: Currently, there is no way in Terraform to copy local directory, so we use `template_dir` for it. +# The downside is, that any Terraform templating syntax stored in this directory will be evaluated, which may bring unexpected results. +resource "template_dir" "kube-router" { + count = var.networking == "kube-router" ? 1 : 0 + source_dir = "${replace(path.module, path.cwd, ".")}/resources/charts/kube-router" + destination_dir = "${var.asset_dir}/charts/kube-system/kube-router" +} + +# Render kube-router.yaml for kube-router chart. +data "template_file" "kube-router" { + count = var.networking == "kube-router" ? 1 : 0 + template = "${file("${path.module}/resources/charts/kube-router.yaml")}" + + vars = { + kube_router_image = var.container_images["kube_router"] + flannel_cni_image = var.container_images["flannel_cni"] + network_mtu = var.network_mtu + } +} diff --git a/bootkube/outputs.tf b/bootkube/outputs.tf new file mode 100644 index 00000000..1ac054ee --- /dev/null +++ b/bootkube/outputs.tf @@ -0,0 +1,67 @@ +output "cluster_dns_service_ip" { + value = cidrhost(var.service_cidr, 10) +} + +// Generated kubeconfig for Kubelets (i.e. lower privilege than admin) +output "kubeconfig-kubelet" { + value = data.template_file.kubeconfig-kubelet.rendered +} + +// Generated kubeconfig for admins (i.e. human super-user) +output "kubeconfig-admin" { + value = data.template_file.kubeconfig-admin.rendered +} + +# etcd TLS assets + +output "etcd_ca_cert" { + value = tls_self_signed_cert.etcd-ca.cert_pem +} + +output "etcd_client_cert" { + value = tls_locally_signed_cert.client.cert_pem +} + +output "etcd_client_key" { + value = tls_private_key.client.private_key_pem +} + +output "etcd_server_cert" { + value = tls_locally_signed_cert.server.cert_pem +} + +output "etcd_server_key" { + value = tls_private_key.server.private_key_pem +} + +output "etcd_peer_cert" { + value = tls_locally_signed_cert.peer.cert_pem +} + +output "etcd_peer_key" { + value = tls_private_key.peer.private_key_pem +} + +# Some platforms may need to reconstruct the kubeconfig directly in user-data. +# That can't be done with the way template_file interpolates multi-line +# contents so the raw components of the kubeconfig may be needed. + +output "ca_cert" { + value = base64encode(tls_self_signed_cert.kube-ca.cert_pem) +} + +output "kubelet_cert" { + value = base64encode(tls_locally_signed_cert.kubelet.cert_pem) +} + +output "kubelet_key" { + value = base64encode(tls_private_key.kubelet.private_key_pem) +} + +output "server" { + value = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port) +} + +output "server_admin" { + value = format("https://%s:%s", element(local.api_servers_external, 0), var.external_apiserver_port) +} diff --git a/bootkube/resources/bootstrap-manifests/bootstrap-apiserver.yaml b/bootkube/resources/bootstrap-manifests/bootstrap-apiserver.yaml new file mode 100644 index 00000000..3b4ca0fa --- /dev/null +++ b/bootkube/resources/bootstrap-manifests/bootstrap-apiserver.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bootstrap-kube-apiserver + namespace: kube-system + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: ${hyperkube_image} + command: + - /hyperkube + - kube-apiserver + - --advertise-address=$(POD_IP) + - --allow-privileged=true + - --anonymous-auth=false + - --authorization-mode=RBAC + - --bind-address=0.0.0.0 + - --client-ca-file=/etc/kubernetes/secrets/ca.crt + - --cloud-provider=${cloud_provider} + - --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodSecurityPolicy + - --etcd-cafile=/etc/kubernetes/secrets/etcd-client-ca.crt + - --etcd-certfile=/etc/kubernetes/secrets/etcd-client.crt + - --etcd-keyfile=/etc/kubernetes/secrets/etcd-client.key + - --etcd-servers=${etcd_servers} + - --insecure-port=0 + - --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt + - --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --secure-port=6443 + - --service-account-key-file=/etc/kubernetes/secrets/service-account.pub + - --service-cluster-ip-range=${service_cidr} + - --storage-backend=etcd3 + - --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt + - --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + volumeMounts: + - name: secrets + mountPath: /etc/kubernetes/secrets + readOnly: true + - name: ssl-certs-host + mountPath: /etc/ssl/certs + readOnly: true + volumes: + - name: secrets + hostPath: + path: /etc/kubernetes/bootstrap-secrets + - name: ssl-certs-host + hostPath: + path: ${trusted_certs_dir} diff --git a/bootkube/resources/bootstrap-manifests/bootstrap-controller-manager.yaml b/bootkube/resources/bootstrap-manifests/bootstrap-controller-manager.yaml new file mode 100644 index 00000000..367a25ac --- /dev/null +++ b/bootkube/resources/bootstrap-manifests/bootstrap-controller-manager.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bootstrap-kube-controller-manager + namespace: kube-system + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' +spec: + containers: + - name: kube-controller-manager + image: ${hyperkube_image} + command: + - ./hyperkube + - kube-controller-manager + - --allocate-node-cidrs=true + - --cluster-cidr=${pod_cidr} + - --service-cluster-ip-range=${service_cidr} + - --cloud-provider=${cloud_provider} + - --cluster-signing-cert-file=/etc/kubernetes/secrets/ca.crt + - --cluster-signing-key-file=/etc/kubernetes/secrets/ca.key + - --configure-cloud-routes=false + - --kubeconfig=/etc/kubernetes/secrets/kubeconfig + - --leader-elect=true + - --root-ca-file=/etc/kubernetes/secrets/ca.crt + - --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key + volumeMounts: + - name: secrets + mountPath: /etc/kubernetes/secrets + readOnly: true + - name: ssl-host + mountPath: /etc/ssl/certs + readOnly: true + hostNetwork: true + volumes: + - name: secrets + hostPath: + path: /etc/kubernetes/bootstrap-secrets + - name: ssl-host + hostPath: + path: ${trusted_certs_dir} diff --git a/bootkube/resources/bootstrap-manifests/bootstrap-scheduler.yaml b/bootkube/resources/bootstrap-manifests/bootstrap-scheduler.yaml new file mode 100644 index 00000000..ed3052f3 --- /dev/null +++ b/bootkube/resources/bootstrap-manifests/bootstrap-scheduler.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bootstrap-kube-scheduler + namespace: kube-system + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' +spec: + containers: + - name: kube-scheduler + image: ${hyperkube_image} + command: + - ./hyperkube + - kube-scheduler + - --kubeconfig=/etc/kubernetes/secrets/kubeconfig + - --leader-elect=true + volumeMounts: + - name: secrets + mountPath: /etc/kubernetes/secrets + readOnly: true + hostNetwork: true + volumes: + - name: secrets + hostPath: + path: /etc/kubernetes/bootstrap-secrets diff --git a/bootkube/resources/charts/calico.yaml b/bootkube/resources/charts/calico.yaml new file mode 100644 index 00000000..5c2a7b92 --- /dev/null +++ b/bootkube/resources/charts/calico.yaml @@ -0,0 +1,11 @@ +calico: + networkMTU: ${network_mtu} + image: ${calico_image} + cniImage: ${calico_cni_image} + enableReporting: ${enable_reporting} + networkIpAutodetectionMethod: ${network_ip_autodetection_method} + ipipEnabled: ${ipip_enabled} + vxlanEnabled: ${vxlan_enabled} + ipipReadiness: "${ipip_readiness}" + podCIDR: ${pod_cidr} + networkEncapsulation: "${network_encapsulation}" diff --git a/bootkube/resources/charts/calico/.helmignore b/bootkube/resources/charts/calico/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/bootkube/resources/charts/calico/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/bootkube/resources/charts/calico/Chart.yaml b/bootkube/resources/charts/calico/Chart.yaml new file mode 100644 index 00000000..69a83722 --- /dev/null +++ b/bootkube/resources/charts/calico/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: calico +description: A Helm chart for installing Calico CNI + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + diff --git a/bootkube/resources/charts/calico/crds/bgpconfigurations.yaml b/bootkube/resources/charts/calico/crds/bgpconfigurations.yaml new file mode 100644 index 00000000..297768c4 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/bgpconfigurations.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration diff --git a/bootkube/resources/charts/calico/crds/bgppeers.yaml b/bootkube/resources/charts/calico/crds/bgppeers.yaml new file mode 100644 index 00000000..7e4ded15 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/bgppeers.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer diff --git a/bootkube/resources/charts/calico/crds/blockaffinities.yaml b/bootkube/resources/charts/calico/crds/blockaffinities.yaml new file mode 100644 index 00000000..27fcb054 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/blockaffinities.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity diff --git a/bootkube/resources/charts/calico/crds/clusterinformations.yaml b/bootkube/resources/charts/calico/crds/clusterinformations.yaml new file mode 100644 index 00000000..d8557c83 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/clusterinformations.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation diff --git a/bootkube/resources/charts/calico/crds/felixconfigurations.yaml b/bootkube/resources/charts/calico/crds/felixconfigurations.yaml new file mode 100644 index 00000000..80e96215 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/felixconfigurations.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration diff --git a/bootkube/resources/charts/calico/crds/globalnetworkpolicies.yaml b/bootkube/resources/charts/calico/crds/globalnetworkpolicies.yaml new file mode 100644 index 00000000..8b3d8692 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/globalnetworkpolicies.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy diff --git a/bootkube/resources/charts/calico/crds/globalnetworksets.yaml b/bootkube/resources/charts/calico/crds/globalnetworksets.yaml new file mode 100644 index 00000000..5fc643c8 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/globalnetworksets.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset diff --git a/bootkube/resources/charts/calico/crds/hostendpoints.yaml b/bootkube/resources/charts/calico/crds/hostendpoints.yaml new file mode 100644 index 00000000..a14edcdd --- /dev/null +++ b/bootkube/resources/charts/calico/crds/hostendpoints.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint diff --git a/bootkube/resources/charts/calico/crds/ipamblocks.yaml b/bootkube/resources/charts/calico/crds/ipamblocks.yaml new file mode 100644 index 00000000..d2879ac0 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/ipamblocks.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock diff --git a/bootkube/resources/charts/calico/crds/ipamconfigs.yaml b/bootkube/resources/charts/calico/crds/ipamconfigs.yaml new file mode 100644 index 00000000..7277b47f --- /dev/null +++ b/bootkube/resources/charts/calico/crds/ipamconfigs.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig diff --git a/bootkube/resources/charts/calico/crds/ipamhandles.yaml b/bootkube/resources/charts/calico/crds/ipamhandles.yaml new file mode 100644 index 00000000..9d53a86d --- /dev/null +++ b/bootkube/resources/charts/calico/crds/ipamhandles.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle diff --git a/bootkube/resources/charts/calico/crds/ippools.yaml b/bootkube/resources/charts/calico/crds/ippools.yaml new file mode 100644 index 00000000..86fe3e94 --- /dev/null +++ b/bootkube/resources/charts/calico/crds/ippools.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool diff --git a/bootkube/resources/charts/calico/crds/networkpolicies.yaml b/bootkube/resources/charts/calico/crds/networkpolicies.yaml new file mode 100644 index 00000000..47f542fa --- /dev/null +++ b/bootkube/resources/charts/calico/crds/networkpolicies.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy diff --git a/bootkube/resources/charts/calico/crds/networksets.yaml b/bootkube/resources/charts/calico/crds/networksets.yaml new file mode 100644 index 00000000..d6e6772c --- /dev/null +++ b/bootkube/resources/charts/calico/crds/networksets.yaml @@ -0,0 +1,12 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset diff --git a/bootkube/resources/charts/calico/templates/cluster-role-binding.yaml b/bootkube/resources/charts/calico/templates/cluster-role-binding.yaml new file mode 100644 index 00000000..f7644926 --- /dev/null +++ b/bootkube/resources/charts/calico/templates/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/bootkube/resources/charts/calico/templates/cluster-role.yaml b/bootkube/resources/charts/calico/templates/cluster-role.yaml new file mode 100644 index 00000000..4469fae8 --- /dev/null +++ b/bootkube/resources/charts/calico/templates/cluster-role.yaml @@ -0,0 +1,109 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: calico-node +rules: + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + - watch + - list + # Used by Calico for policy information + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Calico patches the node NetworkUnavilable status + - patch + # Calico updates some info in node annotations + - update + # CNI plugin patches pods/status + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico reads some info on nodes + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # Calico monitors Kubernetes NetworkPolicies + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Calico monitors its CRDs + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico may perform IPAM allocations + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Watch block affinities for route aggregation + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch diff --git a/bootkube/resources/charts/calico/templates/config.yaml b/bootkube/resources/charts/calico/templates/config.yaml new file mode 100644 index 00000000..5c7789d3 --- /dev/null +++ b/bootkube/resources/charts/calico/templates/config.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: calico-config + namespace: kube-system +data: + # Disable Typha for now. + typha_service_name: "none" + # Calico backend to use + calico_backend: "bird" + # Calico MTU + veth_mtu: "{{ .Values.calico.networkMTU }}" + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } diff --git a/bootkube/resources/charts/calico/templates/daemonset.yaml b/bootkube/resources/charts/calico/templates/daemonset.yaml new file mode 100644 index 00000000..1c418a2d --- /dev/null +++ b/bootkube/resources/charts/calico/templates/daemonset.yaml @@ -0,0 +1,191 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + priorityClassName: system-node-critical + serviceAccountName: calico-node + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + initContainers: + # Install Calico CNI binaries and CNI network config file on nodes + - name: install-cni + image: {{ .Values.calico.cniImage }} + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create on each node. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # Set node name based on k8s nodeName + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Contents of the CNI config to create on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + - name: CNI_NET_DIR + value: "/etc/kubernetes/cni/net.d" + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + - name: SLEEP + value: "false" + volumeMounts: + - name: cni-bin-dir + mountPath: /host/opt/cni/bin + - name: cni-conf-dir + mountPath: /host/etc/cni/net.d + containers: + - name: calico-node + image: {{ .Values.calico.image }} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for datastore + - name: WAIT_FOR_DATASTORE + value: "true" + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name + - name: FELIX_USAGEREPORTINGENABLED + value: "{{ .Values.calico.enableReporting }}" + # Set node name based on k8s nodeName. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Calico network backend + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + - name: IP_AUTODETECTION_METHOD + value: {{ .Values.calico.networkIpAutodetectionMethod }} + # Whether Felix should enable IP-in-IP tunnel + - name: FELIX_IPINIPENABLED + value: "{{ .Values.calico.ipipEnabled }}" + # MTU to set on the IPIP tunnel (if enabled) + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Whether Felix should enable VXLAN tunnel + - name: FELIX_VXLANENABLED + value: "{{ .Values.calico.vxlanEnabled }}" + # MTU to set on the VXLAN tunnel (if enabled) + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + - name: NO_DEFAULT_POOLS + value: "true" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 150m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + {{ .Values.calico.ipipReadiness }} + periodSeconds: 10 + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: var-lib-calico + mountPath: /var/lib/calico + readOnly: false + - name: var-run-calico + mountPath: /var/run/calico + readOnly: false + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + terminationGracePeriodSeconds: 0 + volumes: + # Used by calico/node + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: xtables-lock + hostPath: + type: FileOrCreate + path: /run/xtables.lock + # Used by install-cni + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-conf-dir + hostPath: + path: /etc/kubernetes/cni/net.d diff --git a/bootkube/resources/charts/calico/templates/ippools-default-ipv4.yaml b/bootkube/resources/charts/calico/templates/ippools-default-ipv4.yaml new file mode 100644 index 00000000..140cd0ad --- /dev/null +++ b/bootkube/resources/charts/calico/templates/ippools-default-ipv4.yaml @@ -0,0 +1,10 @@ +apiVersion: crd.projectcalico.org/v1 +kind: IPPool +metadata: + name: default-ipv4-ippool +spec: + blockSize: 24 + cidr: {{ .Values.calico.podCIDR }} + {{ .Values.calico.networkEncapsulation }} + natOutgoing: true + nodeSelector: all() diff --git a/bootkube/resources/charts/calico/templates/service-account.yaml b/bootkube/resources/charts/calico/templates/service-account.yaml new file mode 100644 index 00000000..f16b4b0e --- /dev/null +++ b/bootkube/resources/charts/calico/templates/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/bootkube/resources/charts/calico/values.yaml b/bootkube/resources/charts/calico/values.yaml new file mode 100644 index 00000000..79043e95 --- /dev/null +++ b/bootkube/resources/charts/calico/values.yaml @@ -0,0 +1,11 @@ +calico: + networkMTU: 1500 + image: calico/node:v3.11.2 + cniImage: calico/cni:v3.11.2 + enableReporting: false + networkIpAutodetectionMethod: first-found + ipipEnabled: true + vxlanEnabled: false + ipipReadiness: "- --bird-ready" + podCIDR: 10.2.0.0/16 + networkEncapsulation: "ipipMode: Always" diff --git a/bootkube/resources/charts/flannel.yaml b/bootkube/resources/charts/flannel.yaml new file mode 100644 index 00000000..62e4be72 --- /dev/null +++ b/bootkube/resources/charts/flannel.yaml @@ -0,0 +1,4 @@ +flannel: + image: ${flannel_image} + cniImage: ${flannel_cni_image} + podCIDR: ${pod_cidr} diff --git a/bootkube/resources/charts/flannel/.helmignore b/bootkube/resources/charts/flannel/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/bootkube/resources/charts/flannel/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/bootkube/resources/charts/flannel/Chart.yaml b/bootkube/resources/charts/flannel/Chart.yaml new file mode 100644 index 00000000..be58c559 --- /dev/null +++ b/bootkube/resources/charts/flannel/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: flannel +description: A Helm chart for installing flannel CNI + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + diff --git a/bootkube/resources/charts/flannel/templates/cluster-role-binding.yaml b/bootkube/resources/charts/flannel/templates/cluster-role-binding.yaml new file mode 100644 index 00000000..6efef428 --- /dev/null +++ b/bootkube/resources/charts/flannel/templates/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-system diff --git a/bootkube/resources/charts/flannel/templates/cluster-role.yaml b/bootkube/resources/charts/flannel/templates/cluster-role.yaml new file mode 100644 index 00000000..88688865 --- /dev/null +++ b/bootkube/resources/charts/flannel/templates/cluster-role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch diff --git a/bootkube/resources/charts/flannel/templates/config.yaml b/bootkube/resources/charts/flannel/templates/config.yaml new file mode 100644 index 00000000..2c8f01e5 --- /dev/null +++ b/bootkube/resources/charts/flannel/templates/config.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: flannel-config + namespace: kube-system + labels: + tier: node + k8s-app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ .Values.flannel.podCIDR }}", + "Backend": { + "Type": "vxlan", + "Port": 4789 + } + } diff --git a/bootkube/resources/charts/flannel/templates/daemonset.yaml b/bootkube/resources/charts/flannel/templates/daemonset.yaml new file mode 100644 index 00000000..171ef556 --- /dev/null +++ b/bootkube/resources/charts/flannel/templates/daemonset.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: flannel + namespace: kube-system + labels: + k8s-app: flannel +spec: + selector: + matchLabels: + k8s-app: flannel + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: flannel + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + priorityClassName: system-node-critical + serviceAccountName: flannel + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: flannel + image: {{ .Values.flannel.image }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + securityContext: + privileged: true + resources: + requests: + cpu: 100m + volumeMounts: + - name: flannel-config + mountPath: /etc/kube-flannel/ + - name: run-flannel + mountPath: /run/flannel + - name: install-cni + image: {{ .Values.flannel.cniImage }} + command: ["/install-cni.sh"] + env: + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: flannel-config + key: cni-conf.json + volumeMounts: + - name: cni-bin-dir + mountPath: /host/opt/cni/bin/ + - name: cni-conf-dir + mountPath: /host/etc/cni/net.d + volumes: + - name: flannel-config + configMap: + name: flannel-config + - name: run-flannel + hostPath: + path: /run/flannel + # Used by install-cni + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-conf-dir + hostPath: + path: /etc/kubernetes/cni/net.d diff --git a/bootkube/resources/charts/flannel/templates/service-account.yaml b/bootkube/resources/charts/flannel/templates/service-account.yaml new file mode 100644 index 00000000..7c0411b1 --- /dev/null +++ b/bootkube/resources/charts/flannel/templates/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: kube-system diff --git a/bootkube/resources/charts/flannel/values.yaml b/bootkube/resources/charts/flannel/values.yaml new file mode 100644 index 00000000..eb6173ce --- /dev/null +++ b/bootkube/resources/charts/flannel/values.yaml @@ -0,0 +1,4 @@ +flannel: + image: quay.io/coreos/flannel:v0.11.0-amd64 + cniImage: quay.io/coreos/flannel-cni:v0.3.0 + podCIDR: 10.2.0.0/16 diff --git a/bootkube/resources/charts/kube-router.yaml b/bootkube/resources/charts/kube-router.yaml new file mode 100644 index 00000000..99ed41b1 --- /dev/null +++ b/bootkube/resources/charts/kube-router.yaml @@ -0,0 +1,4 @@ +kubeRouter: + image: ${kube_router_image} + cniImage: ${flannel_cni_image} + networkMTU: ${network_mtu} diff --git a/bootkube/resources/charts/kube-router/.helmignore b/bootkube/resources/charts/kube-router/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/bootkube/resources/charts/kube-router/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/bootkube/resources/charts/kube-router/Chart.yaml b/bootkube/resources/charts/kube-router/Chart.yaml new file mode 100644 index 00000000..be58c559 --- /dev/null +++ b/bootkube/resources/charts/kube-router/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: flannel +description: A Helm chart for installing flannel CNI + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + diff --git a/bootkube/resources/charts/kube-router/templates/cluster-role-binding.yaml b/bootkube/resources/charts/kube-router/templates/cluster-role-binding.yaml new file mode 100644 index 00000000..eb1f4566 --- /dev/null +++ b/bootkube/resources/charts/kube-router/templates/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-router +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-router +subjects: + - kind: ServiceAccount + name: kube-router + namespace: kube-system diff --git a/bootkube/resources/charts/kube-router/templates/cluster-role.yaml b/bootkube/resources/charts/kube-router/templates/cluster-role.yaml new file mode 100644 index 00000000..b00bb846 --- /dev/null +++ b/bootkube/resources/charts/kube-router/templates/cluster-role.yaml @@ -0,0 +1,33 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-router +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - nodes + - endpoints + verbs: + - list + - get + - watch + - apiGroups: + - "networking.k8s.io" + resources: + - networkpolicies + verbs: + - list + - get + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch diff --git a/bootkube/resources/charts/kube-router/templates/config.yaml b/bootkube/resources/charts/kube-router/templates/config.yaml new file mode 100644 index 00000000..199ff985 --- /dev/null +++ b/bootkube/resources/charts/kube-router/templates/config.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-router-config + namespace: kube-system +data: + cni-conf.json: | + { + "name": "pod-network", + "cniVersion": "0.3.1", + "plugins":[ + { + "name": "kube-router", + "type": "bridge", + "bridge": "kube-bridge", + "isDefaultGateway": true, + "mtu": {{ .Values.kubeRouter.networkMTU }}, + "ipam": { + "type": "host-local" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": { + "portMappings": true + } + } + ] + } diff --git a/bootkube/resources/charts/kube-router/templates/daemonset.yaml b/bootkube/resources/charts/kube-router/templates/daemonset.yaml new file mode 100644 index 00000000..dc351332 --- /dev/null +++ b/bootkube/resources/charts/kube-router/templates/daemonset.yaml @@ -0,0 +1,90 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-router + namespace: kube-system + labels: + k8s-app: kube-router +spec: + selector: + matchLabels: + k8s-app: kube-router + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: kube-router + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + priorityClassName: system-node-critical + serviceAccountName: kube-router + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: kube-router + image: {{ .Values.kubeRouter.image}} + args: + - --kubeconfig=/etc/kubernetes/kubeconfig + - --run-router=true + - --run-firewall=true + - --run-service-proxy=false + - --v=5 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + securityContext: + privileged: true + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /etc/kubernetes + readOnly: true + - name: install-cni + image: {{ .Values.kubeRouter.cniImage}} + command: ["/install-cni.sh"] + env: + - name: CNI_OLD_NAME + value: 10-flannel.conflist + - name: CNI_CONF_NAME + value: 10-kuberouter.conflist + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-router-config + key: cni-conf.json + volumeMounts: + - name: cni-bin-dir + mountPath: /host/opt/cni/bin + - name: cni-conf-dir + mountPath: /host/etc/cni/net.d + volumes: + # Used by kube-router + - name: lib-modules + hostPath: + path: /lib/modules + - name: kubeconfig + configMap: + name: kubeconfig-in-cluster + # Used by install-cni + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-conf-dir + hostPath: + path: /etc/kubernetes/cni/net.d diff --git a/bootkube/resources/charts/kube-router/templates/service-account.yaml b/bootkube/resources/charts/kube-router/templates/service-account.yaml new file mode 100644 index 00000000..c3bcc40e --- /dev/null +++ b/bootkube/resources/charts/kube-router/templates/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system diff --git a/bootkube/resources/charts/kube-router/values.yaml b/bootkube/resources/charts/kube-router/values.yaml new file mode 100644 index 00000000..359fef23 --- /dev/null +++ b/bootkube/resources/charts/kube-router/values.yaml @@ -0,0 +1,4 @@ +kubeRouter: + image: cloudnativelabs/kube-router:v0.3.2 + cniImage: quay.io/coreos/flannel-cni:v0.3.0 + networkMTU: 1480 diff --git a/bootkube/resources/charts/kubelet.yaml b/bootkube/resources/charts/kubelet.yaml new file mode 100644 index 00000000..234b5c31 --- /dev/null +++ b/bootkube/resources/charts/kubelet.yaml @@ -0,0 +1,3 @@ +image: ${hyperkube_image} +clusterDNS: ${cluster_dns_service_ip} +clusterDomain: ${cluster_domain_suffix} diff --git a/bootkube/resources/charts/kubelet/Chart.yaml b/bootkube/resources/charts/kubelet/Chart.yaml new file mode 100644 index 00000000..6361e2eb --- /dev/null +++ b/bootkube/resources/charts/kubelet/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: kubelet +description: A Helm chart for installing kubelet + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + diff --git a/bootkube/resources/charts/kubelet/templates/kubelet-ds.yaml b/bootkube/resources/charts/kubelet/templates/kubelet-ds.yaml new file mode 100644 index 00000000..f0733de5 --- /dev/null +++ b/bootkube/resources/charts/kubelet/templates/kubelet-ds.yaml @@ -0,0 +1,137 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kubelet + namespace: kube-system + labels: + tier: node + k8s-app: kubelet +spec: + selector: + matchLabels: + tier: node + k8s-app: kubelet + template: + metadata: + labels: + tier: node + k8s-app: kubelet + spec: + containers: + - name: kubelet + image: {{ .Values.image }} + command: + - /bin/sh + - -c + # File /etc/kubernetes/kubelet.env on each host has the `taints` and `labels` that bootstrap + # kubelet uses. This mechanism here makes sure that the same labels and taints are used in + # this kubelet as well. + args: + - | + echo "./hyperkube kubelet \ + --node-ip=$(HOST_IP) \ + --anonymous-auth=false \ + --authentication-token-webhook \ + --authorization-mode=Webhook \ + --client-ca-file=/etc/kubernetes/ca.crt \ + --cluster_dns={{ .Values.clusterDNS }} \ + --cluster_domain={{ .Values.clusterDomain }} \ + --cni-conf-dir=/etc/kubernetes/cni/net.d \ + --config=/etc/kubernetes/kubelet.config \ + --kubeconfig=/etc/kubernetes/kubeconfig \ + --lock-file=/var/run/lock/kubelet.lock \ + --network-plugin=cni \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --read-only-port=0 \ + --volume-plugin-dir=/var/lib/kubelet/volumeplugins \ + --node-labels=$(grep NODE_LABELS /etc/kubernetes/kubelet.env | cut -d'"' -f2) \ + --register-with-taints=$(grep NODE_TAINTS /etc/kubernetes/kubelet.env | cut -d'"' -f2) + " | tee /tmp/start-kubelet.sh && + chmod +x /tmp/start-kubelet.sh && + /tmp/start-kubelet.sh + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/cni + name: coreos-var-lib-cni + readOnly: false + - mountPath: /var/lib/calico + name: coreos-var-lib-calico + readOnly: true + - mountPath: /opt/cni/bin + name: coreos-opt-cni-bin + readOnly: true + # TODO check if this is needed + - name: dev + mountPath: /dev + - name: run + mountPath: /run + - name: sys + mountPath: /sys + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + - name: var-lib-docker + mountPath: /var/lib/docker + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + # Logs from the kubelet pods + - name: logs + mountPath: /var/log/pods + # This is mounted from host to make sure that the kubelet showcases OS as Flatcar and not + # Debian from the hyperkube image. + - name: os-release + mountPath: /etc/os-release + readOnly: true + hostNetwork: true + hostPID: true + # Tolerate all the taints. This ensures that the pod runs on all the nodes. + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: + - operator: "Exists" + volumes: + - name: coreos-var-lib-cni + hostPath: + path: /var/lib/cni + - name: coreos-var-lib-calico + hostPath: + path: /var/lib/calico + - name: coreos-opt-cni-bin + hostPath: + path: /opt/cni/bin + - name: dev + hostPath: + path: /dev + - name: run + hostPath: + path: /run + - name: sys + hostPath: + path: /sys + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: var-lib-docker + hostPath: + path: /var/lib/docker + - name: var-lib-kubelet + hostPath: + path: /var/lib/kubelet + - name: logs + hostPath: + path: /var/log/pods + - name: os-release + hostPath: + path: /usr/lib/os-release + type: File + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/bootkube/resources/charts/kubelet/values.yaml b/bootkube/resources/charts/kubelet/values.yaml new file mode 100644 index 00000000..84b74d77 --- /dev/null +++ b/bootkube/resources/charts/kubelet/values.yaml @@ -0,0 +1,3 @@ +image: k8s.gcr.io/hyperkube:v1.17.2 +clusterDNS: 10.0.0.10 +clusterDomain: cluster.local diff --git a/bootkube/resources/charts/kubernetes.yaml b/bootkube/resources/charts/kubernetes.yaml new file mode 100644 index 00000000..7b3528e5 --- /dev/null +++ b/bootkube/resources/charts/kubernetes.yaml @@ -0,0 +1,43 @@ +apiserver: + apiserverKey: ${apiserver_key} + apiserverCert: ${apiserver_cert} + serviceAccountPub: ${serviceaccount_pub} + caCert: ${ca_cert} + etcdClientCaCert: ${etcd_ca_cert} + etcdClientCert: ${etcd_client_cert} + etcdClientKey: ${etcd_client_key} + aggregationCaCert: ${aggregation_ca_cert} + aggregationClientCert: ${aggregation_client_cert} + aggregationClientKey: ${aggregation_client_key} + image: ${hyperkube_image} + cloudProvider: ${cloud_provider} + etcdServers: ${etcd_servers} + enableAggregation: ${enable_aggregation} + serviceCIDR: ${service_cidr} + trustedCertsDir: ${trusted_certs_dir} +controllerManager: + serviceAccountKey: ${serviceaccount_key} + caCert: ${ca_cert} + caKey: ${ca_key} + image: ${hyperkube_image} + cloudProvider: ${cloud_provider} + serviceCIDR: ${service_cidr} + podCIDR: ${pod_cidr} + controlPlaneReplicas: ${control_plane_replicas} + trustedCertsDir: ${trusted_certs_dir} +kubeProxy: + image: ${hyperkube_image} + podCIDR: ${pod_cidr} + trustedCertsDir: ${trusted_certs_dir} +kubeScheduler: + image: ${hyperkube_image} + controlPlaneReplicas: ${control_plane_replicas} +kubeConfigInCluster: + server: ${server} +podCheckpointer: + image: ${pod_checkpointer_image} +coredns: + clusterDomainSuffix: ${cluster_domain_suffix} + controlPlaneReplicas: ${control_plane_replicas} + image: ${coredns_image} + clusterIP: ${cluster_dns_service_ip} diff --git a/bootkube/resources/charts/kubernetes/.helmignore b/bootkube/resources/charts/kubernetes/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/bootkube/resources/charts/kubernetes/Chart.yaml b/bootkube/resources/charts/kubernetes/Chart.yaml new file mode 100644 index 00000000..84958e6c --- /dev/null +++ b/bootkube/resources/charts/kubernetes/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: kubernetes +description: A Helm chart for installing Kubernetes control plane components + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role-binding.yaml new file mode 100644 index 00000000..790aa805 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role-binding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:coredns + labels: + kubernetes.io/bootstrapping: rbac-defaults + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: + - kind: ServiceAccount + name: coredns + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role.yaml new file mode 100644 index 00000000..cf239280 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-cluster-role.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:coredns + labels: + kubernetes.io/bootstrapping: rbac-defaults +rules: + - apiGroups: [""] + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes + verbs: + - get diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-config.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-config.yaml new file mode 100644 index 00000000..9e9b294b --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-config.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + log . { + class error + } + kubernetes {{ .Values.coredns.clusterDomainSuffix }} in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-deployment.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-deployment.yaml new file mode 100644 index 00000000..396d9786 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: coredns + kubernetes.io/name: "CoreDNS" +spec: + replicas: {{ .Values.coredns.controlPlaneReplicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + tier: control-plane + k8s-app: coredns + template: + metadata: + labels: + tier: control-plane + k8s-app: coredns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: tier + operator: In + values: + - control-plane + - key: k8s-app + operator: In + values: + - coredns + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: coredns + image: {{ .Values.coredns.image }} + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config + mountPath: /etc/coredns + readOnly: true + ports: + - name: dns + protocol: UDP + containerPort: 53 + - name: dns-tcp + protocol: TCP + containerPort: 53 + - name: metrics + protocol: TCP + containerPort: 9153 + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + dnsPolicy: Default + volumes: + - name: config + configMap: + name: coredns + items: + - key: Corefile + path: Corefile diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-service-account.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-service-account.yaml new file mode 100644 index 00000000..3da25b86 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/coredns-service.yaml b/bootkube/resources/charts/kubernetes/templates/coredns-service.yaml new file mode 100644 index 00000000..beaf3972 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/coredns-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: coredns + namespace: kube-system + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + labels: + k8s-app: coredns + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: coredns + clusterIP: {{ .Values.coredns.clusterIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/bootkube/resources/charts/kubernetes/templates/kube-apiserver-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-role-binding.yaml new file mode 100644 index 00000000..625a083e --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-apiserver +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kube-apiserver + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/kube-apiserver-sa.yaml b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-sa.yaml new file mode 100644 index 00000000..dbad83dd --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: kube-system + name: kube-apiserver diff --git a/bootkube/resources/charts/kubernetes/templates/kube-apiserver-secret.yaml b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-secret.yaml new file mode 100644 index 00000000..ba8c141a --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-apiserver-secret.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kube-apiserver + namespace: kube-system +type: Opaque +data: + apiserver.key: "{{ .Values.apiserver.apiserverKey }}" + apiserver.crt: "{{ .Values.apiserver.apiserverCert }}" + service-account.pub: "{{ .Values.apiserver.serviceAccountPub }}" + ca.crt: "{{ .Values.apiserver.caCert }}" + etcd-client-ca.crt: "{{ .Values.apiserver.etcdClientCaCert }}" + etcd-client.crt: "{{ .Values.apiserver.etcdClientCert }}" + etcd-client.key: "{{ .Values.apiserver.etcdClientKey }}" + aggregation-ca.crt: "{{ .Values.apiserver.aggregationCaCert }}" + aggregation-client.crt: "{{ .Values.apiserver.aggregationClientCert }}" + aggregation-client.key: "{{ .Values.apiserver.aggregationClientKey }}" \ No newline at end of file diff --git a/bootkube/resources/charts/kubernetes/templates/kube-apiserver.yaml b/bootkube/resources/charts/kubernetes/templates/kube-apiserver.yaml new file mode 100644 index 00000000..25a3d39b --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-apiserver.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-apiserver + namespace: kube-system + labels: + tier: control-plane + k8s-app: kube-apiserver +spec: + selector: + matchLabels: + tier: control-plane + k8s-app: kube-apiserver + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + tier: control-plane + k8s-app: kube-apiserver + annotations: + checkpointer.alpha.coreos.com/checkpoint: "true" + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + nodeSelector: + node.kubernetes.io/master: "" + priorityClassName: system-cluster-critical + serviceAccountName: kube-apiserver + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: kube-apiserver + image: {{ .Values.apiserver.image }} + command: + - /hyperkube + - kube-apiserver + - --advertise-address=$(POD_IP) + - --allow-privileged=true + - --anonymous-auth=false + - --authorization-mode=RBAC + - --bind-address=0.0.0.0 + - --client-ca-file=/etc/kubernetes/secrets/ca.crt + - --cloud-provider={{ .Values.apiserver.cloudProvider }} + - --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodSecurityPolicy + - --etcd-cafile=/etc/kubernetes/secrets/etcd-client-ca.crt + - --etcd-certfile=/etc/kubernetes/secrets/etcd-client.crt + - --etcd-keyfile=/etc/kubernetes/secrets/etcd-client.key + - --etcd-servers={{ .Values.apiserver.etcdServers}} + - --insecure-port=0 + - --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt + - --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --secure-port=6443 + - --service-account-key-file=/etc/kubernetes/secrets/service-account.pub + - --service-cluster-ip-range={{ .Values.apiserver.serviceCIDR }} + - --storage-backend=etcd3 + - --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt + - --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key + {{- if .Values.apiserver.enableAggregation }} + - --proxy-client-cert-file=/etc/kubernetes/secrets/aggregation-client.crt + - --proxy-client-key-file=/etc/kubernetes/secrets/aggregation-client.key + - --requestheader-client-ca-file=/etc/kubernetes/secrets/aggregation-ca.crt + - --requestheader-extra-headers-prefix=X-Remote-Extra- + - --requestheader-group-headers=X-Remote-Group + - --requestheader-username-headers=X-Remote-User + {{- end }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + volumeMounts: + - name: secrets + mountPath: /etc/kubernetes/secrets + readOnly: true + - name: ssl-certs-host + mountPath: /etc/ssl/certs + readOnly: true + securityContext: + runAsNonRoot: true + runAsUser: 65534 + volumes: + - name: secrets + secret: + secretName: kube-apiserver + - name: ssl-certs-host + hostPath: + path: {{ .Values.apiserver.trustedCertsDir }} diff --git a/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-disruption.yaml b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-disruption.yaml new file mode 100644 index 00000000..1d1d0235 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-disruption.yaml @@ -0,0 +1,11 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + minAvailable: 1 + selector: + matchLabels: + tier: control-plane + k8s-app: kube-controller-manager diff --git a/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-role-binding.yaml new file mode 100644 index 00000000..9f0b59e8 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-controller-manager +subjects: +- kind: ServiceAccount + name: kube-controller-manager + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-sa.yaml b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-sa.yaml new file mode 100644 index 00000000..bb8f0aab --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: kube-system + name: kube-controller-manager diff --git a/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-secret.yaml b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-secret.yaml new file mode 100644 index 00000000..6372fcdb --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager-secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: kube-controller-manager + namespace: kube-system +type: Opaque +data: + service-account.key: "{{ .Values.controllerManager.serviceAccountKey }}" + ca.crt: "{{ .Values.controllerManager.caCert }}" + ca.key: "{{ .Values.controllerManager.caKey }}" + diff --git a/bootkube/resources/charts/kubernetes/templates/kube-controller-manager.yaml b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager.yaml new file mode 100644 index 00000000..6ee56faf --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-controller-manager.yaml @@ -0,0 +1,96 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-controller-manager + namespace: kube-system + labels: + tier: control-plane + k8s-app: kube-controller-manager +spec: + replicas: {{ .Values.controllerManager.controlPlaneReplicas }} + selector: + matchLabels: + tier: control-plane + k8s-app: kube-controller-manager + template: + metadata: + labels: + tier: control-plane + k8s-app: kube-controller-manager + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: tier + operator: In + values: + - control-plane + - key: k8s-app + operator: In + values: + - kube-controller-manager + topologyKey: kubernetes.io/hostname + nodeSelector: + node.kubernetes.io/master: "" + priorityClassName: system-cluster-critical + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: kube-controller-manager + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: kube-controller-manager + image: {{ .Values.controllerManager.image }} + command: + - ./hyperkube + - kube-controller-manager + - --use-service-account-credentials + - --allocate-node-cidrs=true + - --cloud-provider={{ .Values.controllerManager.cloudProvider }} + - --cluster-cidr={{ .Values.controllerManager.podCIDR }} + - --service-cluster-ip-range={{ .Values.controllerManager.serviceCIDR }} + - --cluster-signing-cert-file=/etc/kubernetes/secrets/ca.crt + - --cluster-signing-key-file=/etc/kubernetes/secrets/ca.key + - --configure-cloud-routes=false + - --leader-elect=true + - --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins + - --pod-eviction-timeout=1m + - --root-ca-file=/etc/kubernetes/secrets/ca.crt + - --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key + livenessProbe: + httpGet: + scheme: HTTPS + path: /healthz + port: 10257 + initialDelaySeconds: 15 + timeoutSeconds: 15 + volumeMounts: + - name: secrets + mountPath: /etc/kubernetes/secrets + readOnly: true + - name: volumeplugins + mountPath: /var/lib/kubelet/volumeplugins + readOnly: true + - name: ssl-host + mountPath: /etc/ssl/certs + readOnly: true + volumes: + - name: secrets + secret: + secretName: kube-controller-manager + - name: ssl-host + hostPath: + path: {{ .Values.controllerManager.trustedCertsDir }} + - name: volumeplugins + hostPath: + path: /var/lib/kubelet/volumeplugins + dnsPolicy: Default # Don't use cluster DNS. diff --git a/bootkube/resources/charts/kubernetes/templates/kube-proxy-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kube-proxy-role-binding.yaml new file mode 100644 index 00000000..0ae0e12a --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-proxy-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-proxy +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-proxier # Automatically created system role. +subjects: +- kind: ServiceAccount + name: kube-proxy + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/kube-proxy-sa.yaml b/bootkube/resources/charts/kubernetes/templates/kube-proxy-sa.yaml new file mode 100644 index 00000000..651d76f3 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-proxy-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: kube-system + name: kube-proxy diff --git a/bootkube/resources/charts/kubernetes/templates/kube-proxy.yaml b/bootkube/resources/charts/kubernetes/templates/kube-proxy.yaml new file mode 100644 index 00000000..9053fed1 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-proxy.yaml @@ -0,0 +1,76 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-proxy + namespace: kube-system + labels: + tier: node + k8s-app: kube-proxy +spec: + selector: + matchLabels: + tier: node + k8s-app: kube-proxy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + tier: node + k8s-app: kube-proxy + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + priorityClassName: system-node-critical + serviceAccountName: kube-proxy + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + containers: + - name: kube-proxy + image: {{ .Values.kubeProxy.image }} + command: + - ./hyperkube + - kube-proxy + - --cluster-cidr={{ .Values.kubeProxy.podCIDR }} + - --hostname-override=$(NODE_NAME) + - --kubeconfig=/etc/kubernetes/kubeconfig + - --proxy-mode=iptables + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + livenessProbe: + httpGet: + path: /healthz + port: 10256 + initialDelaySeconds: 15 + timeoutSeconds: 15 + securityContext: + privileged: true + volumeMounts: + - name: kubeconfig + mountPath: /etc/kubernetes + readOnly: true + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: ssl-certs-host + mountPath: /etc/ssl/certs + readOnly: true + volumes: + - name: kubeconfig + configMap: + name: kubeconfig-in-cluster + - name: lib-modules + hostPath: + path: /lib/modules + - name: ssl-certs-host + hostPath: + path: {{ .Values.kubeProxy.trustedCertsDir }} diff --git a/bootkube/resources/charts/kubernetes/templates/kube-scheduler-disruption.yaml b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-disruption.yaml new file mode 100644 index 00000000..11af3faa --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-disruption.yaml @@ -0,0 +1,11 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: kube-scheduler + namespace: kube-system +spec: + minAvailable: 1 + selector: + matchLabels: + tier: control-plane + k8s-app: kube-scheduler diff --git a/bootkube/resources/charts/kubernetes/templates/kube-scheduler-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-role-binding.yaml new file mode 100644 index 00000000..b64e98f9 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kube-scheduler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-scheduler +subjects: +- kind: ServiceAccount + name: kube-scheduler + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/kube-scheduler-sa.yaml b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-sa.yaml new file mode 100644 index 00000000..ef959ba9 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: kube-system + name: kube-scheduler diff --git a/bootkube/resources/charts/kubernetes/templates/kube-scheduler-volume-scheduler-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-volume-scheduler-role-binding.yaml new file mode 100644 index 00000000..fee1d492 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-scheduler-volume-scheduler-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: volume-scheduler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:volume-scheduler +subjects: +- kind: ServiceAccount + name: kube-scheduler + namespace: kube-system + diff --git a/bootkube/resources/charts/kubernetes/templates/kube-scheduler.yaml b/bootkube/resources/charts/kubernetes/templates/kube-scheduler.yaml new file mode 100644 index 00000000..f20d3c79 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kube-scheduler.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-scheduler + namespace: kube-system + labels: + tier: control-plane + k8s-app: kube-scheduler +spec: + replicas: {{ .Values.kubeScheduler.controlPlaneReplicas }} + selector: + matchLabels: + tier: control-plane + k8s-app: kube-scheduler + template: + metadata: + labels: + tier: control-plane + k8s-app: kube-scheduler + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: tier + operator: In + values: + - control-plane + - key: k8s-app + operator: In + values: + - kube-scheduler + topologyKey: kubernetes.io/hostname + nodeSelector: + node.kubernetes.io/master: "" + priorityClassName: system-cluster-critical + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: kube-scheduler + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: kube-scheduler + image: "{{ .Values.kubeScheduler.image }}" + command: + - ./hyperkube + - kube-scheduler + - --leader-elect=true + livenessProbe: + httpGet: + scheme: HTTPS + path: /healthz + port: 10259 + initialDelaySeconds: 15 + timeoutSeconds: 15 diff --git a/bootkube/resources/charts/kubernetes/templates/kubeconfig-in-cluster.yaml b/bootkube/resources/charts/kubernetes/templates/kubeconfig-in-cluster.yaml new file mode 100644 index 00000000..3e2eab3e --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kubeconfig-in-cluster.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeconfig-in-cluster + namespace: kube-system +data: + kubeconfig: | + apiVersion: v1 + clusters: + - name: local + cluster: + # kubeconfig-in-cluster is for control plane components that must reach + # kube-apiserver before service IPs are available (e.g.10.3.0.1) + server: {{ .Values.kubeConfigInCluster.server }} + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + users: + - name: service-account + user: + # Use service account token + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + contexts: + - context: + cluster: local + user: service-account diff --git a/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role-binding.yaml new file mode 100644 index 00000000..7e736ff9 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubelet-delete +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubelet-delete +subjects: +- kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io diff --git a/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role.yaml b/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role.yaml new file mode 100644 index 00000000..b423ff73 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kubelet-delete-cluster-role.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubelet-delete +rules: + - apiGroups: [""] + resources: + - nodes + verbs: + - delete diff --git a/bootkube/resources/charts/kubernetes/templates/kubelet-nodes-cluster-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kubelet-nodes-cluster-role-binding.yaml new file mode 100644 index 00000000..5dfcc170 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kubelet-nodes-cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-nodes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node +subjects: +- kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io diff --git a/bootkube/resources/charts/kubernetes/templates/kubelet-pod-checkpointer-psp-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/kubelet-pod-checkpointer-psp-role-binding.yaml new file mode 100644 index 00000000..ce61d371 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/kubelet-pod-checkpointer-psp-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubelet-pod-checkpointer-psp + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-checkpointer-psp +subjects: +- kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role-binding.yaml new file mode 100644 index 00000000..6cd7b42f --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pod-checkpointer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pod-checkpointer +subjects: +- kind: ServiceAccount + name: pod-checkpointer + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role.yaml new file mode 100644 index 00000000..a7631389 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-cluster-role.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pod-checkpointer +rules: + - apiGroups: [""] + resources: + - nodes + - nodes/proxy + verbs: + - get diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-psp.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-psp.yaml new file mode 100644 index 00000000..7f70ee9b --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-psp.yaml @@ -0,0 +1,33 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + # https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order + # If the pod must be defaulted or mutated, the first PodSecurityPolicy (ordered by name) to allow the pod is selected. + name: pod-checkpointer-restricted + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' +spec: + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false + allowedHostPaths: + - pathPrefix: "/etc/kubernetes" + - pathPrefix: "/var/run" + - pathPrefix: "/etc/checkpointer" diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role-binding.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role-binding.yaml new file mode 100644 index 00000000..9d47ee82 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role-binding.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-checkpointer + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-checkpointer +subjects: +- kind: ServiceAccount + name: pod-checkpointer + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-checkpointer-psp + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-checkpointer-psp +subjects: +- kind: ServiceAccount + name: pod-checkpointer + namespace: kube-system diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role.yaml new file mode 100644 index 00000000..6300e3ef --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-checkpointer + namespace: kube-system +rules: +- apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] +- apiGroups: [""] # "" indicates the core API group + resources: ["secrets", "configmaps"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-checkpointer-psp + namespace: kube-system +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - pod-checkpointer-restricted diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-sa.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-sa.yaml new file mode 100644 index 00000000..e7692800 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: kube-system + name: pod-checkpointer diff --git a/bootkube/resources/charts/kubernetes/templates/pod-checkpointer.yaml b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer.yaml new file mode 100644 index 00000000..0064d6cf --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/pod-checkpointer.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pod-checkpointer + namespace: kube-system + labels: + tier: control-plane + k8s-app: pod-checkpointer +spec: + selector: + matchLabels: + tier: control-plane + k8s-app: pod-checkpointer + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + tier: control-plane + k8s-app: pod-checkpointer + annotations: + checkpointer.alpha.coreos.com/checkpoint: "true" + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + hostNetwork: true + nodeSelector: + node.kubernetes.io/master: "" + priorityClassName: system-node-critical + serviceAccountName: pod-checkpointer + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: pod-checkpointer + image: {{ .Values.podCheckpointer.image }} + command: + - /checkpoint + - --lock-file=/var/run/lock/pod-checkpointer.lock + - --kubeconfig=/etc/checkpointer/kubeconfig + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: kubeconfig + mountPath: /etc/checkpointer + - name: etc-kubernetes + mountPath: /etc/kubernetes + - name: var-run + mountPath: /var/run + volumes: + - name: kubeconfig + configMap: + name: kubeconfig-in-cluster + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: var-run + hostPath: + path: /var/run diff --git a/bootkube/resources/charts/kubernetes/templates/psp-privileged.yaml b/bootkube/resources/charts/kubernetes/templates/psp-privileged.yaml new file mode 100644 index 00000000..d4c954b3 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/psp-privileged.yaml @@ -0,0 +1,65 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' +spec: + privileged: true + allowPrivilegeEscalation: true + allowedCapabilities: + - '*' + volumes: + - '*' + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + hostIPC: true + hostPID: true + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: privileged-psp +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - privileged +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: privileged-psp-cluster-admins +roleRef: + kind: ClusterRole + name: privileged-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: Group + name: system:masters + apiGroup: rbac.authorization.k8s.io +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: privileged-psp-kubesystem + namespace: kube-system +roleRef: + kind: ClusterRole + name: privileged-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: Group + name: system:serviceaccounts:kube-system + apiGroup: rbac.authorization.k8s.io diff --git a/bootkube/resources/charts/kubernetes/templates/psp-restricted.yaml b/bootkube/resources/charts/kubernetes/templates/psp-restricted.yaml new file mode 100644 index 00000000..dc99835d --- /dev/null +++ b/bootkube/resources/charts/kubernetes/templates/psp-restricted.yaml @@ -0,0 +1,72 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: restricted + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + requiredDropCapabilities: + - KILL + - MKNOD + - SETUID + - SETGID + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that persistentVolumes set up by the cluster admin are safe to use. + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: restricted-psp +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - restricted +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: restricted-psp-system-authenticated +roleRef: + kind: ClusterRole + name: restricted-psp + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io diff --git a/bootkube/resources/charts/kubernetes/values.yaml b/bootkube/resources/charts/kubernetes/values.yaml new file mode 100644 index 00000000..d628cc66 --- /dev/null +++ b/bootkube/resources/charts/kubernetes/values.yaml @@ -0,0 +1,43 @@ +apiserver: + apiserverKey: + apiserverCert: + serviceAccountPub: + caCert: + etcdClientCaCert: + etcdClientCert: + etcdClientKey: + aggregationCaCert: + aggregationClientCert: + aggregationClientKey: + image: k8s.gcr.io/hyperkube:v1.17.2 + cloudProvider: + etcdServers: + aggregationFlags: + serviceCIDR: 10.0.0.0/24 + trustedCertsDir: /usr/share/ca-certificates +controllerManager: + serviceAccountKey: + caCert: + caKey: + image: k8s.gcr.io/hyperkube:v1.17.2 + cloudProvider: + serviceCIDR: 10.0.0.0/24 + podCIDR: 10.2.0.0/16 + controlPlaneReplicas: 1 + trustedCertsDir: /usr/share/ca-certificates +kubeProxy: + image: k8s.gcr.io/hyperkube:v1.17.2 + podCIDR: 10.2.0.0/16 + trustedCertsDir: /usr/share/ca-certificates +kubeScheduler: + image: k8s.gcr.io/hyperkube:v1.17.2 + controlPlaneReplicas: 1 +kubeConfigInCluster: + server: +podCheckpointer: + image: kinvolk/pod-checkpointer:83e25e5968391b9eb342042c435d1b3eeddb2be1 +coredns: + clusterDomainSuffix: cluster.local + controlPlaneReplicas: 1 + image: coredns/coredns:coredns-amd64 + clusterIP: 10.0.0.10 diff --git a/bootkube/resources/kubeconfig-admin b/bootkube/resources/kubeconfig-admin new file mode 100644 index 00000000..8ed410b7 --- /dev/null +++ b/bootkube/resources/kubeconfig-admin @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Config +clusters: +- name: ${name}-cluster + cluster: + server: ${server} + certificate-authority-data: ${ca_cert} +users: +- name: ${name}-user + user: + client-certificate-data: ${kubelet_cert} + client-key-data: ${kubelet_key} +current-context: ${name}-context +contexts: +- name: ${name}-context + context: + cluster: ${name}-cluster + user: ${name}-user diff --git a/bootkube/resources/kubeconfig-kubelet b/bootkube/resources/kubeconfig-kubelet new file mode 100644 index 00000000..5d8fb0c3 --- /dev/null +++ b/bootkube/resources/kubeconfig-kubelet @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + server: ${server} + certificate-authority-data: ${ca_cert} +users: +- name: kubelet + user: + client-certificate-data: ${kubelet_cert} + client-key-data: ${kubelet_key} +contexts: +- context: + cluster: local + user: kubelet diff --git a/bootkube/terraform.tfvars.example b/bootkube/terraform.tfvars.example new file mode 100644 index 00000000..b7a0f13d --- /dev/null +++ b/bootkube/terraform.tfvars.example @@ -0,0 +1,5 @@ +cluster_name = "example" +api_servers = ["node1.example.com"] +etcd_servers = ["node1.example.com"] +asset_dir = "/home/core/mycluster" +networking = "flannel" diff --git a/bootkube/tls-aggregation.tf b/bootkube/tls-aggregation.tf new file mode 100644 index 00000000..78eb8d68 --- /dev/null +++ b/bootkube/tls-aggregation.tf @@ -0,0 +1,105 @@ +# NOTE: Across this module, the following workaround is used: +# `"${var.some_var == "condition" ? join(" ", tls_private_key.aggregation-ca.*.private_key_pem) : ""}"` +# Due to https://github.com/hashicorp/hil/issues/50, both sides of conditions +# are evaluated, until one of them is discarded. When a `count` is used resources +# can be referenced as lists with the `.*` notation, and arrays are allowed to be +# empty. The `join()` interpolation function is then used to cast them back to +# a string. Since `count` can only be 0 or 1, the returned value is either empty +# (and discarded anyways) or the desired value. + +# Kubernetes Aggregation CA (i.e. front-proxy-ca) +# Files: tls/{aggregation-ca.crt,aggregation-ca.key} + +resource "tls_private_key" "aggregation-ca" { + count = var.enable_aggregation == true ? 1 : 0 + + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_self_signed_cert" "aggregation-ca" { + count = var.enable_aggregation == true ? 1 : 0 + + key_algorithm = tls_private_key.aggregation-ca[0].algorithm + private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem + + subject { + common_name = "kubernetes-front-proxy-ca" + } + + is_ca_certificate = true + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "cert_signing", + ] +} + +resource "local_file" "aggregation-ca-key" { + count = var.enable_aggregation == true ? 1 : 0 + + content = tls_private_key.aggregation-ca[0].private_key_pem + filename = "${var.asset_dir}/tls/aggregation-ca.key" +} + +resource "local_file" "aggregation-ca-crt" { + count = var.enable_aggregation == true ? 1 : 0 + + content = tls_self_signed_cert.aggregation-ca[0].cert_pem + filename = "${var.asset_dir}/tls/aggregation-ca.crt" +} + +# Kubernetes apiserver (i.e. front-proxy-client) +# Files: tls/{aggregation-client.crt,aggregation-client.key} + +resource "tls_private_key" "aggregation-client" { + count = var.enable_aggregation == true ? 1 : 0 + + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "aggregation-client" { + count = var.enable_aggregation == true ? 1 : 0 + + key_algorithm = tls_private_key.aggregation-client[0].algorithm + private_key_pem = tls_private_key.aggregation-client[0].private_key_pem + + subject { + common_name = "kube-apiserver" + } +} + +resource "tls_locally_signed_cert" "aggregation-client" { + count = var.enable_aggregation == true ? 1 : 0 + + cert_request_pem = tls_cert_request.aggregation-client[0].cert_request_pem + + ca_key_algorithm = tls_self_signed_cert.aggregation-ca[0].key_algorithm + ca_private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem + ca_cert_pem = tls_self_signed_cert.aggregation-ca[0].cert_pem + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "client_auth", + ] +} + +resource "local_file" "aggregation-client-key" { + count = var.enable_aggregation == true ? 1 : 0 + + content = tls_private_key.aggregation-client[0].private_key_pem + filename = "${var.asset_dir}/tls/aggregation-client.key" +} + +resource "local_file" "aggregation-client-crt" { + count = var.enable_aggregation == true ? 1 : 0 + + content = tls_locally_signed_cert.aggregation-client[0].cert_pem + filename = "${var.asset_dir}/tls/aggregation-client.crt" +} diff --git a/bootkube/tls-etcd.tf b/bootkube/tls-etcd.tf new file mode 100644 index 00000000..89090986 --- /dev/null +++ b/bootkube/tls-etcd.tf @@ -0,0 +1,203 @@ +# etcd-ca.crt +resource "local_file" "etcd_ca_crt" { + content = tls_self_signed_cert.etcd-ca.cert_pem + filename = "${var.asset_dir}/tls/etcd-ca.crt" +} + +# etcd-ca.key +resource "local_file" "etcd_ca_key" { + content = tls_private_key.etcd-ca.private_key_pem + filename = "${var.asset_dir}/tls/etcd-ca.key" +} + +# etcd-client-ca.crt +resource "local_file" "etcd_client_ca_crt" { + content = tls_self_signed_cert.etcd-ca.cert_pem + filename = "${var.asset_dir}/tls/etcd-client-ca.crt" +} + +# etcd-client.crt +resource "local_file" "etcd_client_crt" { + content = tls_locally_signed_cert.client.cert_pem + filename = "${var.asset_dir}/tls/etcd-client.crt" +} + +# etcd-client.key +resource "local_file" "etcd_client_key" { + content = tls_private_key.client.private_key_pem + filename = "${var.asset_dir}/tls/etcd-client.key" +} + +# server-ca.crt +resource "local_file" "etcd_server_ca_crt" { + content = tls_self_signed_cert.etcd-ca.cert_pem + filename = "${var.asset_dir}/tls/etcd/server-ca.crt" +} + +# server.crt +resource "local_file" "etcd_server_crt" { + content = tls_locally_signed_cert.server.cert_pem + filename = "${var.asset_dir}/tls/etcd/server.crt" +} + +# server.key +resource "local_file" "etcd_server_key" { + content = tls_private_key.server.private_key_pem + filename = "${var.asset_dir}/tls/etcd/server.key" +} + +# peer-ca.crt +resource "local_file" "etcd_peer_ca_crt" { + content = tls_self_signed_cert.etcd-ca.cert_pem + filename = "${var.asset_dir}/tls/etcd/peer-ca.crt" +} + +# peer.crt +resource "local_file" "etcd_peer_crt" { + content = tls_locally_signed_cert.peer.cert_pem + filename = "${var.asset_dir}/tls/etcd/peer.crt" +} + +# peer.key +resource "local_file" "etcd_peer_key" { + content = tls_private_key.peer.private_key_pem + filename = "${var.asset_dir}/tls/etcd/peer.key" +} + +# certificates and keys + +resource "tls_private_key" "etcd-ca" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_self_signed_cert" "etcd-ca" { + key_algorithm = tls_private_key.etcd-ca.algorithm + private_key_pem = tls_private_key.etcd-ca.private_key_pem + + subject { + common_name = "etcd-ca" + organization = "etcd" + } + + is_ca_certificate = true + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "cert_signing", + ] +} + +# client certs are used for client (apiserver, locksmith, etcd-operator) +# to etcd communication +resource "tls_private_key" "client" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "client" { + key_algorithm = tls_private_key.client.algorithm + private_key_pem = tls_private_key.client.private_key_pem + + subject { + common_name = "etcd-client" + organization = "etcd" + } + + ip_addresses = [ + "127.0.0.1", + ] + + dns_names = concat(var.etcd_servers, ["localhost"]) +} + +resource "tls_locally_signed_cert" "client" { + cert_request_pem = tls_cert_request.client.cert_request_pem + + ca_key_algorithm = join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm) + ca_private_key_pem = join(" ", tls_private_key.etcd-ca.*.private_key_pem) + ca_cert_pem = join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem) + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + "client_auth", + ] +} + +resource "tls_private_key" "server" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "server" { + key_algorithm = tls_private_key.server.algorithm + private_key_pem = tls_private_key.server.private_key_pem + + subject { + common_name = "etcd-server" + organization = "etcd" + } + + ip_addresses = [ + "127.0.0.1", + ] + + dns_names = concat(var.etcd_servers, ["localhost"]) +} + +resource "tls_locally_signed_cert" "server" { + cert_request_pem = tls_cert_request.server.cert_request_pem + + ca_key_algorithm = join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm) + ca_private_key_pem = join(" ", tls_private_key.etcd-ca.*.private_key_pem) + ca_cert_pem = join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem) + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + "client_auth", + ] +} + +resource "tls_private_key" "peer" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "peer" { + key_algorithm = tls_private_key.peer.algorithm + private_key_pem = tls_private_key.peer.private_key_pem + + subject { + common_name = "etcd-peer" + organization = "etcd" + } + + dns_names = var.etcd_servers +} + +resource "tls_locally_signed_cert" "peer" { + cert_request_pem = tls_cert_request.peer.cert_request_pem + + ca_key_algorithm = join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm) + ca_private_key_pem = join(" ", tls_private_key.etcd-ca.*.private_key_pem) + ca_cert_pem = join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem) + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + "client_auth", + ] +} diff --git a/bootkube/tls-k8s.tf b/bootkube/tls-k8s.tf new file mode 100644 index 00000000..b7fcd259 --- /dev/null +++ b/bootkube/tls-k8s.tf @@ -0,0 +1,194 @@ +# Kubernetes CA (tls/{ca.crt,ca.key}) + +resource "tls_private_key" "kube-ca" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_self_signed_cert" "kube-ca" { + key_algorithm = tls_private_key.kube-ca.algorithm + private_key_pem = tls_private_key.kube-ca.private_key_pem + + subject { + common_name = "kubernetes-ca" + organization = "bootkube" + } + + is_ca_certificate = true + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "cert_signing", + ] +} + +resource "local_file" "kube-ca-key" { + content = tls_private_key.kube-ca.private_key_pem + filename = "${var.asset_dir}/tls/ca.key" +} + +resource "local_file" "kube-ca-crt" { + content = tls_self_signed_cert.kube-ca.cert_pem + filename = "${var.asset_dir}/tls/ca.crt" +} + +# Kubernetes API Server (tls/{apiserver.key,apiserver.crt}) + +resource "tls_private_key" "apiserver" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "apiserver" { + key_algorithm = tls_private_key.apiserver.algorithm + private_key_pem = tls_private_key.apiserver.private_key_pem + + subject { + common_name = "kube-apiserver" + organization = "system:masters" + } + + dns_names = flatten([ + var.api_servers, + var.api_servers_external, + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + "kubernetes.default.svc.${var.cluster_domain_suffix}", + ]) + + ip_addresses = concat([cidrhost(var.service_cidr, 1)], var.api_servers_ips) +} + +resource "tls_locally_signed_cert" "apiserver" { + cert_request_pem = tls_cert_request.apiserver.cert_request_pem + + ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm + ca_private_key_pem = tls_private_key.kube-ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + "client_auth", + ] +} + +resource "local_file" "apiserver-key" { + content = tls_private_key.apiserver.private_key_pem + filename = "${var.asset_dir}/tls/apiserver.key" +} + +resource "local_file" "apiserver-crt" { + content = tls_locally_signed_cert.apiserver.cert_pem + filename = "${var.asset_dir}/tls/apiserver.crt" +} + +# Kubernetes Admin (tls/{admin.key,admin.crt}) + +resource "tls_private_key" "admin" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "admin" { + key_algorithm = tls_private_key.admin.algorithm + private_key_pem = tls_private_key.admin.private_key_pem + + subject { + common_name = "kubernetes-admin" + organization = "system:masters" + } +} + +resource "tls_locally_signed_cert" "admin" { + cert_request_pem = tls_cert_request.admin.cert_request_pem + + ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm + ca_private_key_pem = tls_private_key.kube-ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "client_auth", + ] +} + +resource "local_file" "admin-key" { + content = tls_private_key.admin.private_key_pem + filename = "${var.asset_dir}/tls/admin.key" +} + +resource "local_file" "admin-crt" { + content = tls_locally_signed_cert.admin.cert_pem + filename = "${var.asset_dir}/tls/admin.crt" +} + +# Kubernete's Service Account (tls/{service-account.key,service-account.pub}) + +resource "tls_private_key" "service-account" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "local_file" "service-account-key" { + content = tls_private_key.service-account.private_key_pem + filename = "${var.asset_dir}/tls/service-account.key" +} + +resource "local_file" "service-account-crt" { + content = tls_private_key.service-account.public_key_pem + filename = "${var.asset_dir}/tls/service-account.pub" +} + +# Kubelet + +resource "tls_private_key" "kubelet" { + algorithm = "RSA" + rsa_bits = "2048" +} + +resource "tls_cert_request" "kubelet" { + key_algorithm = tls_private_key.kubelet.algorithm + private_key_pem = tls_private_key.kubelet.private_key_pem + + subject { + common_name = "kubelet" + organization = "system:nodes" + } +} + +resource "tls_locally_signed_cert" "kubelet" { + cert_request_pem = tls_cert_request.kubelet.cert_request_pem + + ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm + ca_private_key_pem = tls_private_key.kube-ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem + + validity_period_hours = var.certs_validity_period_hours + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + "client_auth", + ] +} + +resource "local_file" "kubelet-key" { + content = tls_private_key.kubelet.private_key_pem + filename = "${var.asset_dir}/tls/kubelet.key" +} + +resource "local_file" "kubelet-crt" { + content = tls_locally_signed_cert.kubelet.cert_pem + filename = "${var.asset_dir}/tls/kubelet.crt" +} diff --git a/bootkube/variables.tf b/bootkube/variables.tf new file mode 100644 index 00000000..a8f9c7db --- /dev/null +++ b/bootkube/variables.tf @@ -0,0 +1,141 @@ +variable "cluster_name" { + description = "Cluster name" + type = string +} + +variable "api_servers" { + description = "List of domain names used to reach kube-apiserver from within the cluster" + type = list(string) +} + +# When not set, the value of var.api_servers will be used. +variable "api_servers_external" { + description = "List of domain names used to reach kube-apiserver from an external network" + type = list(string) + default = [] +} + +variable "api_servers_ips" { + description = "List of additional IPv4 addresses to be included in the kube-apiserver TLS certificate" + type = list(string) + default = [] +} + +variable "etcd_servers" { + description = "List of domain names used to reach etcd servers." + type = list(string) +} + +variable "asset_dir" { + description = "Path to a directory where generated assets should be placed (contains secrets)" + type = string +} + +variable "cloud_provider" { + description = "The provider for cloud services (empty string for no provider)" + type = string + default = "" +} + +variable "networking" { + description = "Choice of networking provider (flannel or calico or kube-router)" + type = string + default = "flannel" +} + +variable "network_mtu" { + description = "CNI interface MTU (only applies to calico and kube-router)" + type = number + default = 1500 +} + +variable "network_encapsulation" { + description = "Network encapsulation mode either ipip or vxlan (only applies to calico)" + type = string + default = "ipip" +} + +variable "network_ip_autodetection_method" { + description = "Method to autodetect the host IPv4 address (only applies to calico)" + type = string + default = "first-found" +} + +variable "pod_cidr" { + description = "CIDR IP range to assign Kubernetes pods" + type = string + default = "10.2.0.0/16" +} + +variable "service_cidr" { + description = <