From 96c06c06a8ae9ee9eca4fd89ef0d6efa222199b0 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Tue, 2 Aug 2022 18:12:37 -0700 Subject: [PATCH] Migrate Flatcar Linux from Ignition spec v2.3.0 to v3.3.0 * Requires poseidon v0.11+ and Flatcar Linux 3185.0.0+ (action required) * Previously, Flatcar Linux configs have been parsed as Container Linux Configs to Ignition v2.2.0 specs by poseidon/ct * Flatcar Linux starting in 3185.0.0 now supports Ignition v3.x specs (which are rendered from Butane Configs, like Fedora CoreOS) * poseidon/ct v0.11.0 adds support for the flatcar Butane Config variant so that Flatcar Linux can use Ignition v3.x Rel: * [Flatcar Support](https://flatcar-linux.org/docs/latest/provisioning/ignition/specification/#ignition-v3) * [poseidon/ct support](https://github.com/poseidon/terraform-provider-ct/pull/131) --- cl/controller.yaml | 192 ----------------------------------------- workers/cl/worker.yaml | 115 ------------------------ 2 files changed, 307 deletions(-) delete mode 100644 cl/controller.yaml delete mode 100644 workers/cl/worker.yaml diff --git a/cl/controller.yaml b/cl/controller.yaml deleted file mode 100644 index e791acb..0000000 --- a/cl/controller.yaml +++ /dev/null @@ -1,192 +0,0 @@ ---- -systemd: - units: - - name: etcd-member.service - enabled: true - dropins: - - name: 40-etcd-cluster.conf - contents: | - [Service] - Environment="ETCD_IMAGE_TAG=v3.4.12" - Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd" - Environment="RKT_RUN_ARGS=--insecure-options=image" - Environment="ETCD_NAME=${etcd_name}" - Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379" - Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380" - Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379" - Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380" - Environment="ETCD_LISTEN_METRICS_URLS=http://0.0.0.0:2381" - Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}" - Environment="ETCD_STRICT_RECONFIG_CHECK=true" - Environment="ETCD_SSL_DIR=/etc/ssl/etcd" - Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt" - Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt" - Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key" - Environment="ETCD_CLIENT_CERT_AUTH=true" - Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt" - Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt" - Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key" - Environment="ETCD_PEER_CLIENT_CERT_AUTH=true" - - name: docker.service - enabled: true - - name: locksmithd.service - mask: true - - name: wait-for-dns.service - enabled: true - contents: | - [Unit] - Description=Wait for DNS entries - Wants=systemd-resolved.service - Before=kubelet.service - [Service] - Type=oneshot - RemainAfterExit=true - ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done' - [Install] - RequiredBy=kubelet.service - RequiredBy=etcd-member.service - - name: kubelet.service - enabled: true - contents: | - [Unit] - Description=Kubelet - Requires=docker.service - After=docker.service - Wants=rpc-statd.service - [Service] - Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3 - ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d - ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests - ExecStartPre=/bin/mkdir -p /opt/cni/bin - ExecStartPre=/bin/mkdir -p /var/lib/calico - ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins - ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt" - ExecStartPre=/usr/bin/docker run -d \ - --name kubelet \ - --privileged \ - --pid host \ - --network host \ - -v /etc/kubernetes:/etc/kubernetes:ro \ - -v /etc/machine-id:/etc/machine-id:ro \ - -v /usr/lib/os-release:/etc/os-release:ro \ - -v /lib/modules:/lib/modules:ro \ - -v /run:/run \ - -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - -v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \ - -v /var/lib/calico:/var/lib/calico:ro \ - -v /var/lib/docker:/var/lib/docker \ - -v /var/lib/kubelet:/var/lib/kubelet:rshared \ - -v /var/log:/var/log \ - -v /opt/cni/bin:/opt/cni/bin \ - $${KUBELET_IMAGE} \ - --anonymous-auth=false \ - --authentication-token-webhook \ - --authorization-mode=Webhook \ - --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ - --client-ca-file=/etc/kubernetes/ca.crt \ - --cluster_dns=${cluster_dns_service_ip} \ - --cluster_domain=${cluster_domain_suffix} \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --healthz-port=0 \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --network-plugin=cni \ - --node-labels=node.kubernetes.io/controller="true" \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --register-with-taints=node-role.kubernetes.io/controller=:NoSchedule \ - --read-only-port=0 \ - --rotate-certificates \ - --volume-plugin-dir=/var/lib/kubelet/volumeplugins - ExecStart=docker logs -f kubelet - ExecStop=docker stop kubelet - ExecStopPost=docker rm kubelet - Restart=always - RestartSec=10 - [Install] - WantedBy=multi-user.target - - name: bootstrap.service - contents: | - [Unit] - Description=Kubernetes control plane - ConditionPathExists=!/opt/bootstrap/bootstrap.done - [Service] - Type=oneshot - RemainAfterExit=true - WorkingDirectory=/opt/bootstrap - ExecStart=/usr/bin/rkt run \ - --trust-keys-from-https \ - --volume config,kind=host,source=/etc/kubernetes/bootstrap-secrets \ - --mount volume=config,target=/etc/kubernetes/secrets \ - --volume assets,kind=host,source=/opt/bootstrap/assets \ - --mount volume=assets,target=/assets \ - --volume script,kind=host,source=/opt/bootstrap/apply \ - --mount volume=script,target=/apply \ - --insecure-options=image \ - docker://quay.io/poseidon/kubelet:v1.19.3 \ - --net=host \ - --dns=host \ - --exec=/apply - ExecStartPost=/bin/touch /opt/bootstrap/bootstrap.done - [Install] - WantedBy=multi-user.target -storage: - directories: - - path: /var/lib/etcd - filesystem: root - mode: 0700 - overwrite: true - files: - - path: /etc/kubernetes/kubeconfig - filesystem: root - mode: 0644 - contents: - inline: | - ${kubeconfig} - - path: /opt/bootstrap/layout - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/bash -e - mkdir -p -- auth tls/etcd tls/k8s static-manifests manifests/coredns manifests-networking - awk '/#####/ {filename=$2; next} {print > filename}' assets - mkdir -p /etc/ssl/etcd/etcd - mkdir -p /etc/kubernetes/bootstrap-secrets - mv tls/etcd/{peer*,server*} /etc/ssl/etcd/etcd/ - mv tls/etcd/etcd-client* /etc/kubernetes/bootstrap-secrets/ - chown -R etcd:etcd /etc/ssl/etcd - chmod -R 500 /etc/ssl/etcd - chmod -R 700 /var/lib/etcd - mv auth/kubeconfig /etc/kubernetes/bootstrap-secrets/ - mv tls/k8s/* /etc/kubernetes/bootstrap-secrets/ - mkdir -p /etc/kubernetes/manifests - mv static-manifests/* /etc/kubernetes/manifests/ - mkdir -p /opt/bootstrap/assets - mv manifests /opt/bootstrap/assets/manifests - mv manifests-networking/* /opt/bootstrap/assets/manifests/ - rm -rf assets auth static-manifests tls manifests-networking - - path: /opt/bootstrap/apply - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/bash -e - export KUBECONFIG=/etc/kubernetes/secrets/kubeconfig - until kubectl version; do - echo "Waiting for static pod control plane" - sleep 5 - done - until kubectl apply -f /assets/manifests -R; do - echo "Retry applying manifests" - sleep 5 - done - - path: /etc/sysctl.d/max-user-watches.conf - filesystem: root - mode: 0644 - contents: - inline: | - fs.inotify.max_user_watches=16184 -passwd: - users: - - name: core - ssh_authorized_keys: - - "${ssh_authorized_key}" diff --git a/workers/cl/worker.yaml b/workers/cl/worker.yaml deleted file mode 100644 index 150122d..0000000 --- a/workers/cl/worker.yaml +++ /dev/null @@ -1,115 +0,0 @@ ---- -systemd: - units: - - name: docker.service - enabled: true - - name: locksmithd.service - mask: true - - name: wait-for-dns.service - enabled: true - contents: | - [Unit] - Description=Wait for DNS entries - Wants=systemd-resolved.service - Before=kubelet.service - [Service] - Type=oneshot - RemainAfterExit=true - ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done' - [Install] - RequiredBy=kubelet.service - - name: kubelet.service - enabled: true - contents: | - [Unit] - Description=Kubelet - Requires=docker.service - After=docker.service - Wants=rpc-statd.service - [Service] - Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3 - ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d - ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests - ExecStartPre=/bin/mkdir -p /opt/cni/bin - ExecStartPre=/bin/mkdir -p /var/lib/calico - ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins - ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt" - # Podman, rkt, or runc run container processes, whereas docker run - # is a client to a daemon and requires workarounds to use within a - # systemd unit. https://github.com/moby/moby/issues/6791 - ExecStartPre=/usr/bin/docker run -d \ - --name kubelet \ - --privileged \ - --pid host \ - --network host \ - -v /etc/kubernetes:/etc/kubernetes:ro \ - -v /etc/machine-id:/etc/machine-id:ro \ - -v /usr/lib/os-release:/etc/os-release:ro \ - -v /lib/modules:/lib/modules:ro \ - -v /run:/run \ - -v /sys/fs/cgroup:/sys/fs/cgroup:ro \ - -v /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd \ - -v /var/lib/calico:/var/lib/calico:ro \ - -v /var/lib/docker:/var/lib/docker \ - -v /var/lib/kubelet:/var/lib/kubelet:rshared \ - -v /var/log:/var/log \ - -v /opt/cni/bin:/opt/cni/bin \ - $${KUBELET_IMAGE} \ - --anonymous-auth=false \ - --authentication-token-webhook \ - --authorization-mode=Webhook \ - --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig \ - --client-ca-file=/etc/kubernetes/ca.crt \ - --cluster_dns=${cluster_dns_service_ip} \ - --cluster_domain=${cluster_domain_suffix} \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --healthz-port=0 \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --network-plugin=cni \ - --node-labels=node.kubernetes.io/node \ - %{~ for label in split(",", node_labels) ~} - --node-labels=${label} \ - %{~ endfor ~} - --pod-manifest-path=/etc/kubernetes/manifests \ - --read-only-port=0 \ - --rotate-certificates \ - --volume-plugin-dir=/var/lib/kubelet/volumeplugins - ExecStart=docker logs -f kubelet - ExecStop=docker stop kubelet - ExecStopPost=docker rm kubelet - Restart=always - RestartSec=5 - [Install] - WantedBy=multi-user.target - - name: delete-node.service - enabled: true - contents: | - [Unit] - Description=Delete Kubernetes node on shutdown - [Service] - Environment=KUBELET_IMAGE=quay.io/poseidon/kubelet:v1.19.3 - Type=oneshot - RemainAfterExit=true - ExecStart=/bin/true - ExecStop=/bin/bash -c '/usr/bin/docker run -v /var/lib/kubelet:/var/lib/kubelet:ro --entrypoint /usr/local/bin/kubectl $${KUBELET_IMAGE} --kubeconfig=/var/lib/kubelet/kubeconfig delete node $HOSTNAME' - [Install] - WantedBy=multi-user.target -storage: - files: - - path: /etc/kubernetes/kubeconfig - filesystem: root - mode: 0644 - contents: - inline: | - ${kubeconfig} - - path: /etc/sysctl.d/max-user-watches.conf - filesystem: root - mode: 0644 - contents: - inline: | - fs.inotify.max_user_watches=16184 -passwd: - users: - - name: core - ssh_authorized_keys: - - "${ssh_authorized_key}"