Skip to content

Commit

Permalink
feat!: refactor taskfiles and update README
Browse files Browse the repository at this point in the history
Signed-off-by: Devin Buhl <devin@buhl.casa>
  • Loading branch information
onedr0p committed Jan 6, 2024
1 parent f9a7307 commit a7eadd3
Show file tree
Hide file tree
Showing 7 changed files with 184 additions and 166 deletions.
81 changes: 45 additions & 36 deletions .taskfiles/AnsibleTasks.yaml
Original file line number Diff line number Diff line change
@@ -1,54 +1,63 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3"

vars:
ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks"
ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory"
env:
PATH: "{{.ROOT_DIR}}/.venv/bin:$PATH"
VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv"
ANSIBLE_COLLECTIONS_PATH: "{{.ROOT_DIR}}/.venv/galaxy"
ANSIBLE_ROLES_PATH: "{{.ROOT_DIR}}/.venv/galaxy/ansible_roles"
ANSIBLE_VARS_ENABLED: "host_group_vars,community.sops.sops"

tasks:

prepare:
desc: Prepare all the k8s nodes for running k3s
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-prepare.yaml

install:
desc: Install Kubernetes on the nodes
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-installation.yaml

rollout-update:
desc: Preform operating system updates and rollout restart the cluster
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-rollout-update.yaml

nuke:
desc: Uninstall Kubernetes on the nodes
dir: "{{.ANSIBLE_DIR}}"
interactive: true
cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-nuke.yaml

reboot:
desc: Reboot all the k8s nodes
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-reboot.yaml
deps:
desc: Set up Ansible dependencies for the environment
cmds:
- task: .venv

run:
desc: Run an Ansible playbook for configuring a cluster
summary: |
Args:
playbook: Playbook to run (required)
prompt: Run Ansible playbook '{{.playbook}}'... continue?
deps: ["deps"]
cmd: |
.venv/bin/ansible-playbook \
--inventory {{.ANSIBLE_DIR}}/inventory/hosts.yaml \
{{.ANSIBLE_DIR}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}
preconditions:
- { msg: "Argument (playbook) is required", sh: "test -n {{.playbook}}" }
- { msg: "Venv not found", sh: "test -d {{.ROOT_DIR}}/.venv" }
- { msg: "Inventory not found", sh: "test -f {{.ANSIBLE_DIR}}/inventory/hosts.yaml" }
- { msg: "Playbook not found", sh: "test -f {{.ANSIBLE_DIR}}/playbooks/{{.playbook}}.yaml" }

poweroff:
desc: Shutdown all the k8s nodes
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible kubernetes -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml -a '/usr/bin/systemctl poweroff' --become
cmd: ansible kubernetes -i {{.ANSIBLE_DIR}}/inventory/hosts.yaml -a '/usr/bin/systemctl poweroff' --become

list:
desc: List all the hosts
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --list-hosts
cmd: ansible kubernetes -i {{.ANSIBLE_DIR}}/inventory/hosts.yaml --list-hosts

ping:
desc: Ping all the hosts
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -m 'ping'
cmd: ansible kubernetes -i {{.ANSIBLE_DIR}}/inventory/hosts.yaml --one-line -m 'ping'

uptime:
desc: Uptime of all the hosts
dir: "{{.ANSIBLE_DIR}}"
cmd: ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -a 'uptime'
cmd: ansible kubernetes -i {{.ANSIBLE_DIR}}/inventory/hosts.yaml --one-line -a 'uptime'

.venv:
internal: true
cmds:
- true && {{.PYTHON_BIN}} -m venv {{.ROOT_DIR}}/.venv
- .venv/bin/python3 -m pip install --upgrade pip setuptools wheel
- .venv/bin/python3 -m pip install --upgrade --requirement "{{.ROOT_DIR}}/requirements.txt"
- .venv/bin/ansible-galaxy install --role-file "{{.ROOT_DIR}}/requirements.yaml" --force
sources:
- "{{.ANSIBLE_DIR}}/requirements.txt"
- "{{.ANSIBLE_DIR}}/requirements.yaml"
generates:
- "{{.ROOT_DIR}}/.venv/pyvenv.cfg"
1 change: 1 addition & 0 deletions .taskfiles/BrewTasks.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3"

tasks:
Expand Down
80 changes: 0 additions & 80 deletions .taskfiles/ClusterTasks.yaml

This file was deleted.

57 changes: 57 additions & 0 deletions .taskfiles/FluxTasks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3"

vars:
# renovate: datasource=github-releases depName=prometheus-operator/prometheus-operator
PROMETHEUS_OPERATOR_VERSION: "v0.70.0"

tasks:

bootstrap:
desc: Bootstrap Flux into a Kubernetes cluster
cmds:
# Install essential Prometheus Operator CRDs
- kubectl apply --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
- kubectl apply --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
- kubectl apply --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
- kubectl apply --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
# Install Flux
- kubectl apply --server-side --kustomize {{.KUBERNETES_DIR}}/bootstrap
# Install Secrets and Kustomizations
- cat {{.ROOT_DIR}}/age.key | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin
- sops --decrypt {{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml | kubectl apply --server-side --filename -
- sops --decrypt {{.KUBERNETES_DIR}}/flux/vars/cluster-secrets-user.sops.yaml | kubectl apply --server-side --filename -
- kubectl apply --server-side --filename {{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml
- kubectl apply --server-side --filename {{.KUBERNETES_DIR}}/flux/vars/cluster-settings-user.yaml
- kubectl apply --server-side --kustomize {{.KUBERNETES_DIR}}/flux/config
preconditions:
- { msg: "Flux already appears installed", sh: "exit $(( ! $(kubectl get namespace flux-system) ))" }
- { msg: "Age private key not found", sh: "test -f {{.ROOT_DIR}}/age.key" }

apply:
desc: Apply a Flux Kustomization resource for a cluster
summary: |
Args:
path: Path containing the Flux Kustomization resource (ks.yaml) (required)
ns: Namespace the Flux Kustomization exists in (default: flux-system)
cmd: |
flux build ks $(basename {{.path}}) \
--namespace {{.ns}} \
--kustomization-file {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml \
--path {{.KUBERNETES_DIR}}/apps/{{.path}} \
{{- if contains "not found" .ks }}--dry-run \{{ end }}
| \
kubectl apply --server-side \
--field-manager=kustomize-controller -f -
vars:
path: '{{ or .path (fail "Argument (path) is required") }}'
ns: '{{.ns | default "flux-system"}}'
ks:
sh: flux --namespace {{.ns}} get kustomizations $(basename {{.path}}) 2>&1
preconditions:
- { msg: "Kustomization file not found", sh: "test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml" }

reconcile:
desc: Force update Flux to pull in changes from your Git repository
cmd: flux reconcile --namespace flux-system kustomization cluster --with-source
67 changes: 67 additions & 0 deletions .taskfiles/KubernetesTasks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
---
# yaml-language-server: $schema=https://taskfile.dev/schema.json
version: "3"

tasks:

mount:
desc: Mount a PersistentVolumeClaim to a temporary pod for a cluster
summary: |
Args:
ns: Namespace the PVC is in (default: default)
claim: PVC to mount (required)
interactive: true
cmd: |
kubectl run -n {{.ns}} debug-{{.claim}} -i --tty --rm --image=null --privileged --overrides='
{
"apiVersion": "v1",
"spec": {
"containers": [
{
"name": "debug",
"image": "ghcr.io/onedr0p/alpine:rolling",
"command": ["/bin/bash"],
"stdin": true,
"stdinOnce": true,
"tty": true,
"volumeMounts": [
{
"name": "config",
"mountPath": "/config"
}
]
}
],
"volumes": [
{
"name": "config",
"persistentVolumeClaim": {
"claimName": "{{.claim}}"
}
}
],
"restartPolicy": "Never"
}
}'
vars:
ns: '{{.ns | default "default"}}'
claim: '{{ or .claim (fail "Argument (claim) is required") }}'
preconditions:
- { msg: "PVC not found", sh: "kubectl -n {{.ns}} get persistentvolumeclaim {{.claim}}" }

resources:
desc: Gather common resources in your cluster, useful when asking for support
cmds:
- for: { var: RESOURCES }
cmd: kubectl get {{.ITEM}} {{.CLI_ARGS | default "-A"}}
vars:
RESOURCES: >-
nodes
gitrepositories
kustomizations
helmrepositories
helmreleases
certificates
certificaterequests
ingresses
pods
23 changes: 9 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,19 +142,17 @@ Once you have installed Debian on your nodes, there are six stages to getting a

### 🌱 Stage 2: Setup your local workstation environment

1. Install the most recent version of [task](https://taskfile.dev/), see the task [installation docs](https://taskfile.dev/installation/) for other supported platforms.
1. Install the most recent version of [task](https://taskfile.dev/), see the [installation docs](https://taskfile.dev/installation/) for other supported platforms.

```sh
# Brew
brew install go-task
```

2. Install the most recent version of [direnv](https://direnv.net/), see the direnv [installation docs](https://direnv.net/docs/installation.html) for other supported platforms.
2. Install the most recent version of [direnv](https://direnv.net/), see the [installation docs](https://direnv.net/docs/installation.html) for other supported platforms.

📍 _After installing `direnv` be sure to **[hook it into your shell](https://direnv.net/docs/hook.html)** and after that is done run `direnv allow` while in your repos' directory._

```sh
# Brew
brew install direnv
```

Expand All @@ -163,16 +161,14 @@ Once you have installed Debian on your nodes, there are six stages to getting a
📍 _This commands requires Python 3.10+ to be installed_

```sh
# Platform agnostic
task deps
task ansible:deps
```

4. Install the required tools: [age](https://github.com/FiloSottile/age), [flux](https://toolkit.fluxcd.io/), [cloudflared](https://github.com/cloudflare/cloudflared), [kubectl](https://kubernetes.io/docs/tasks/tools/), [sops](https://github.com/getsops/sops)

📍 _Not using brew? Make sure to look up how to install the latest version of each of these CLI tools yourself._

```sh
# Brew
task brew:deps
```

Expand Down Expand Up @@ -283,12 +279,12 @@ Once you have installed Debian on your nodes, there are six stages to getting a
4. Run the Ansible prepare playbook (nodes wil reboot when done)

```sh
task ansible:prepare
task ansible:run playbook=cluster-prepare
```

### ⛵ Stage 5: Use Ansible to install k3s

📍 _Here we will be running a Ansible Playbook to install [k3s](https://k3s.io/) with [this](https://galaxy.ansible.com/xanmanning/k3s) Ansible galaxy role. If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over from this point._
📍 _Here we will be running a Ansible Playbook to install [k3s](https://k3s.io/) with [this](https://galaxy.ansible.com/xanmanning/k3s) Ansible galaxy role. If you run into problems, you can run `task ansible:run playbook=cluster-nuke` to destroy the k3s cluster and start over from this point._

1. Verify Ansible can view your config

Expand All @@ -305,7 +301,7 @@ Once you have installed Debian on your nodes, there are six stages to getting a
3. Install k3s with Ansible

```sh
task ansible:install
task ansible:run playbook=cluster-installation
```

4. Verify the nodes are online
Expand Down Expand Up @@ -348,7 +344,7 @@ Once you have installed Debian on your nodes, there are six stages to getting a
3. Install Flux and sync the cluster to the Git repository

```sh
task cluster:install
task flux:bootstrap
# namespace/flux-system configured
# customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created
# ...
Expand All @@ -371,11 +367,10 @@ _Mic check, 1, 2_ - In a few moments applications should be lighting up like Chr

1. Output all the common resources in your cluster.

📍 _Feel free to use the provided [cluster tasks](.taskfiles/ClusterTasks.yaml) for validation of cluster resources or continue to get familiar with the `kubectl` and `flux` CLI tools._

📍 _Feel free to use the provided [kubernetes tasks](.taskfiles/KubernetesTasks.yaml) for validation of cluster resources or continue to get familiar with the `kubectl` and `flux` CLI tools._

```sh
task cluster:resources
task kubernetes:resources
```

2. ⚠️ It might take `cert-manager` awhile to generate certificates, this is normal so be patient.
Expand Down

0 comments on commit a7eadd3

Please sign in to comment.