diff --git a/.github/typos.toml b/.github/typos.toml index 600a39baf..7ebdacef4 100644 --- a/.github/typos.toml +++ b/.github/typos.toml @@ -6,6 +6,7 @@ HashiCorp = "HashiCorp" mavrickrishi = "mavrickrishi" # Username mavrick = "mavrick" # Username inh = "inh" # Option in setpriv command +exportfs = "exportfs" # nfs related binary [files] extend-exclude = ["registry/coder/templates/aws-devcontainer/architecture.svg"] #False positive \ No newline at end of file diff --git a/registry/ericpaulsen/templates/nfs-deployment/README.md b/registry/ericpaulsen/templates/nfs-deployment/README.md new file mode 100644 index 000000000..c2bdbdc6e --- /dev/null +++ b/registry/ericpaulsen/templates/nfs-deployment/README.md @@ -0,0 +1,70 @@ +--- +display_name: "NFS K8s Deployment" +description: "Mount an NFS share to a Coder K8s workspace" +icon: "../../../../.icons/folder.svg" +verified: false +tags: ["kubernetes", "shared-dir", "nfs"] +--- + +# NFS K8s Deployment + +This template provisions a Coder workspace as a Kubernetes Deployment, with an NFS share mounted +as a volume. The NFS share will synchronize the server-side files onto the client (Coder workspace) +When you stop the Coder workspace and rebuild, the NFS share will be re-mounted, and the changes persisted. + +Note the `volume` and `volume_mount` blocks in the deployment and container spec, +respectively: + +```terraform +resource "kubernetes_deployment" "main" { + spec { + template { + spec { + container { + volume_mount { + mount_path = data.coder_parameter.nfs_mount_path.value # mount path in the container + name = "nfs-share" + } + } + volume { + name = "nfs-share" + nfs { + path = data.coder_parameter.nfs_mount_path.value # path to be exported from the server + server = data.coder_parameter.nfs_server.value # server IP address + } + } + } + } + } +} +``` + +## server-side configuration + +1. Create an NFS mount on the server for the clients to access: + + ```console + export NFS_MNT_PATH=/mnt/nfs_share + # Create directory to shaare + sudo mkdir -p $NFS_MNT_PATH + # Assign UID & GIDs access + sudo chown -R uid:gid $NFS_MNT_PATH + sudo chmod 777 $NFS_MNT_PATH + ``` + +1. Grant access to the client by updating the `/etc/exports` file, which + controls the directories shared with remote clients. See + [Red Hat's docs for more information about the configuration options](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/deployment_guide/s1-nfs-server-config-exports). + + ```console + # Provides read/write access to clients accessing the NFS from any IP address. + /mnt/nfs_share *(rw,sync,no_subtree_check) + ``` + +1. Export the NFS file share directory. You must do this every time you change + `/etc/exports`. + + ```console + sudo exportfs -a + sudo systemctl restart + ``` diff --git a/registry/ericpaulsen/templates/nfs-deployment/main.tf b/registry/ericpaulsen/templates/nfs-deployment/main.tf new file mode 100644 index 000000000..e8c395e60 --- /dev/null +++ b/registry/ericpaulsen/templates/nfs-deployment/main.tf @@ -0,0 +1,348 @@ +terraform { + required_providers { + coder = { + source = "coder/coder" + } + kubernetes = { + source = "hashicorp/kubernetes" + } + } +} + +provider "coder" { +} + +provider "kubernetes" { + config_path = var.use_kubeconfig == true ? "~/.kube/config" : null +} + +variable "use_kubeconfig" { + type = bool + description = <<-EOF + Use host kubeconfig? (true/false) + + Set this to false if the Coder host is itself running as a Pod on the same + Kubernetes cluster as you are deploying workspaces to. + + Set this to true if the Coder host is running outside the Kubernetes cluster + for workspaces. A valid "~/.kube/config" must be present on the Coder host. + EOF + default = false +} + +variable "namespace" { + type = string + description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces). If the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to, set this to the same namespace." +} + +data "coder_workspace" "me" {} +data "coder_workspace_owner" "me" {} + +data "coder_parameter" "cpu" { + name = "cpu" + display_name = "CPU" + description = "The number of CPU cores" + default = "2" + icon = "/icon/memory.svg" + mutable = true + option { + name = "2 Cores" + value = "2" + } + option { + name = "4 Cores" + value = "4" + } + option { + name = "6 Cores" + value = "6" + } + option { + name = "8 Cores" + value = "8" + } +} + +data "coder_parameter" "memory" { + name = "memory" + display_name = "Memory" + description = "The amount of memory in GB" + default = "2" + icon = "/icon/memory.svg" + mutable = true + option { + name = "2 GB" + value = "2" + } + option { + name = "4 GB" + value = "4" + } + option { + name = "6 GB" + value = "6" + } + option { + name = "8 GB" + value = "8" + } +} + +data "coder_parameter" "home_disk_size" { + name = "home_disk_size" + display_name = "Home disk size" + description = "The size of the home disk in GB" + default = "10" + type = "number" + icon = "/emojis/1f4be.png" + mutable = false + validation { + min = 1 + max = 99999 + } +} + +data "coder_parameter" "nfs_server" { + name = "nfs_server" + type = "string" + display_name = "NFS Server IP" + description = "The NFS server IP address to use for the workspace" +} + +data "coder_parameter" "nfs_mount_path" { + name = "nfs_mount_path" + type = "string" + display_name = "NFS Mount Path" + description = "The path in your workspace container to mount the NFS share to" + default = "/mnt/nfs-share" + validation { + regex = "^/[a-zA-Z0-9_-]+(/[a-zA-Z0-9_-]+)*$" + error = "NFS mount path must be a valid path in your workspace container" + } +} + +resource "coder_agent" "coder" { + os = "linux" + arch = "amd64" + + # The following metadata blocks are optional. They are used to display + # information about your workspace in the dashboard. You can remove them + # if you don't want to display any information. + # For basic resources, you can use the `coder stat` command. + # If you need more control, you can write your own script. + metadata { + display_name = "CPU Usage" + key = "0_cpu_usage" + script = "coder stat cpu" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "RAM Usage" + key = "1_ram_usage" + script = "coder stat mem" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Home Disk" + key = "3_home_disk" + script = "coder stat disk --path $${HOME}" + interval = 60 + timeout = 1 + } + + metadata { + display_name = "CPU Usage (Host)" + key = "4_cpu_usage_host" + script = "coder stat cpu --host" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Memory Usage (Host)" + key = "5_mem_usage_host" + script = "coder stat mem --host" + interval = 10 + timeout = 1 + } + + metadata { + display_name = "Load Average (Host)" + key = "6_load_host" + # get load avg scaled by number of cores + script = <