From 58ac1b349bc516ada30be8fa129523b119c32c4e Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 16:40:01 +0200 Subject: [PATCH 01/13] docs(ins): add warning for kapsule nodes --- menu/navigation.json | 8 ++++ .../instances/api-cli/migrating-instances.mdx | 4 ++ pages/kubernetes/how-to/create-node-pool.mdx | 36 ++++++++++++++ pages/kubernetes/how-to/migrate-node-pool.mdx | 47 +++++++++++++++++++ 4 files changed, 95 insertions(+) create mode 100644 pages/kubernetes/how-to/create-node-pool.mdx create mode 100644 pages/kubernetes/how-to/migrate-node-pool.mdx diff --git a/menu/navigation.json b/menu/navigation.json index a94bcaeb87..3f16a535a3 100644 --- a/menu/navigation.json +++ b/menu/navigation.json @@ -1875,6 +1875,14 @@ "label": "Connect to a cluster with kubectl", "slug": "connect-cluster-kubectl" }, + { + "label": "Create a new node Kapsule node pool", + "slug": "create-node-pool" + }, + { + "label": "Migrate a Kapsule node pool", + "slug": "migrate-node-pool" + }, { "label": "Deploy an image from Container Registry", "slug": "deploy-image-from-container-registry" diff --git a/pages/instances/api-cli/migrating-instances.mdx b/pages/instances/api-cli/migrating-instances.mdx index 4777445d45..390ff6ab66 100644 --- a/pages/instances/api-cli/migrating-instances.mdx +++ b/pages/instances/api-cli/migrating-instances.mdx @@ -26,6 +26,10 @@ To do so, you need the Instance’s ID and a valid API key. Network interface names may vary across commercial families (e.g. ENT1 vs. POP2). Ensure that any hardcoded interface names in your configurations or scripts are updated to avoid migration issues. + + Do **not** manually change the commercial type of **Kubernetes Kapsule nodes** using the API or CLI. Kubernetes Kapsule nodes **must be managed** through Kubernetes. Modifying node types outside of the recommended method can lead to instability or unexpected behavior. + To change the commercial type of your nodes, create a new node pool with the desired Instance type and [migrate your workloads](/kubernets/how-to/migrate-node-pool/) to the new pool. + diff --git a/pages/kubernetes/how-to/create-node-pool.mdx b/pages/kubernetes/how-to/create-node-pool.mdx new file mode 100644 index 0000000000..ed344cfaa4 --- /dev/null +++ b/pages/kubernetes/how-to/create-node-pool.mdx @@ -0,0 +1,36 @@ +--- +meta: + title: Create a new Kubernetes Kapsule node pool + description: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. +content: + h1: Create a new Kubernetes Kapsule node pool + paragraph: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. +tags: kubernetes kapsule kosmos +dates: + validation: 2025-06-23 + posted: 2025-06-23 +categories: + - containers +--- + +This documentation provides step-by-step instructions on how to create a new node pool for an existing Kubernetes Kapsule cluster. + + + +- A Scaleway account logged into the [console](https://console.scaleway.com) +- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization +- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) + +1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. +2. Click the Kapsule cluster name you want to manage. The cluster information page displays. +3. Click the **Pools** tab to display the pool configuration of the cluster. +4. Click **Add pool** to launch the pool creation wizard. +5. Configure the pool: + - Choose the **Availability Zone** for the pool. + - Choose the commercial type of Instance for the pool. + - Configure the system volume. + - Configure pool options. + - Enter the pool's details. +6. Click **Add pool**. The pool gets added to your basket. Repeat the steps above to configure addional pools. +7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. +8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. \ No newline at end of file diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx new file mode 100644 index 0000000000..e563ae053c --- /dev/null +++ b/pages/kubernetes/how-to/migrate-node-pool.mdx @@ -0,0 +1,47 @@ +--- +meta: + title: Migrating Kubetnetes workloads to a new node pool + description: Learn how to migrate existing Kubernetes workloads to a new node pool. +content: + h1: Migrating Kubetnetes workloads to a new node pools + paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. +tags: kubernetes kapsule kosmos +dates: + validation: 2025-06-23 + posted: 2025-06-23 +categories: + - containers +--- + +This documentation provides step-by-step instructions on how to migrate Kubernetes workloads from one node pool to another within a Kubernetes Kapsule cluster. +Migrating workloads can be required to change the commercial type of Instance for your pool, or to scale your infrastructure. + + + +- A Scaleway account logged into the [console](https://console.scaleway.com) +- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization +- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) +- Have an existing node pool that you want to migrate + + + Always ensure that your **data is backed up** before performing any operations that could affect it. + + +1. Create the new node pool with the desired configuration either [from the console](/kubernetes/how-to/create-node-pool/) or by using `kubectl`. + + Ensure that the new node pool is properly labeled if necessary. + + +2. Run `kubectl get nodes` to check that the new nodes are in a `Ready` state. + +3. Cordon the nodes in the old node pool to prevent new pods from being scheduled there. For each node, run: `kubectl cordon ` + +4. Drain the nodes to evict the pods gracefully. + - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` + - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. + - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. + - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for furher information. + +5. After draining, verify that the pods have been rescheduled to the new node pool. Run `kubectl get pods -o wide` after daraining, to verify that the pods have been rescheduled to the new node pool + +6. Delete the old node pool once you confirm that all workloads are running smoothly on the new node pool, From 99dc6b3d3b340c2a1c79260e797825a89481ead7 Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 16:49:07 +0200 Subject: [PATCH 02/13] Apply suggestions from code review Co-authored-by: Jessica <113192637+jcirinosclwy@users.noreply.github.com> --- pages/kubernetes/how-to/create-node-pool.mdx | 2 +- pages/kubernetes/how-to/migrate-node-pool.mdx | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pages/kubernetes/how-to/create-node-pool.mdx b/pages/kubernetes/how-to/create-node-pool.mdx index ed344cfaa4..93009b3c38 100644 --- a/pages/kubernetes/how-to/create-node-pool.mdx +++ b/pages/kubernetes/how-to/create-node-pool.mdx @@ -31,6 +31,6 @@ This documentation provides step-by-step instructions on how to create a new nod - Configure the system volume. - Configure pool options. - Enter the pool's details. -6. Click **Add pool**. The pool gets added to your basket. Repeat the steps above to configure addional pools. +6. Click **Add pool**. The pool gets added to your basket. Repeat the steps above to configure additional pools. 7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. 8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. \ No newline at end of file diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx index e563ae053c..814ce9cbc2 100644 --- a/pages/kubernetes/how-to/migrate-node-pool.mdx +++ b/pages/kubernetes/how-to/migrate-node-pool.mdx @@ -1,9 +1,9 @@ --- meta: - title: Migrating Kubetnetes workloads to a new node pool + title: Migrating Kubernetes workloads to a new node pool description: Learn how to migrate existing Kubernetes workloads to a new node pool. content: - h1: Migrating Kubetnetes workloads to a new node pools + h1: Migrating Kubernetes workloads to a new node pools paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. tags: kubernetes kapsule kosmos dates: @@ -44,4 +44,4 @@ Migrating workloads can be required to change the commercial type of Instance fo 5. After draining, verify that the pods have been rescheduled to the new node pool. Run `kubectl get pods -o wide` after daraining, to verify that the pods have been rescheduled to the new node pool -6. Delete the old node pool once you confirm that all workloads are running smoothly on the new node pool, +6. Delete the old node pool once you confirm that all workloads are running smoothly on the new node pool. From b1ac7d8d3c41d4e53b7bfe683a6243f61e5cd7d1 Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 16:48:34 +0200 Subject: [PATCH 03/13] feat(k8s): add delete pool --- menu/navigation.json | 4 +++ pages/kubernetes/how-to/create-node-pool.mdx | 4 +-- pages/kubernetes/how-to/delete-node-pool.mdx | 31 +++++++++++++++++++ pages/kubernetes/how-to/migrate-node-pool.mdx | 6 ++-- 4 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 pages/kubernetes/how-to/delete-node-pool.mdx diff --git a/menu/navigation.json b/menu/navigation.json index 3f16a535a3..7cfa159922 100644 --- a/menu/navigation.json +++ b/menu/navigation.json @@ -1883,6 +1883,10 @@ "label": "Migrate a Kapsule node pool", "slug": "migrate-node-pool" }, + { + "label": "Delete a Kapsule node pool", + "slug": "delete-node-pool" + }, { "label": "Deploy an image from Container Registry", "slug": "deploy-image-from-container-registry" diff --git a/pages/kubernetes/how-to/create-node-pool.mdx b/pages/kubernetes/how-to/create-node-pool.mdx index 93009b3c38..f36162b669 100644 --- a/pages/kubernetes/how-to/create-node-pool.mdx +++ b/pages/kubernetes/how-to/create-node-pool.mdx @@ -1,9 +1,9 @@ --- meta: - title: Create a new Kubernetes Kapsule node pool + title: How to create a new Kubernetes Kapsule node pool description: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. content: - h1: Create a new Kubernetes Kapsule node pool + h1: How to create a new Kubernetes Kapsule node pool paragraph: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. tags: kubernetes kapsule kosmos dates: diff --git a/pages/kubernetes/how-to/delete-node-pool.mdx b/pages/kubernetes/how-to/delete-node-pool.mdx new file mode 100644 index 0000000000..709bc0e671 --- /dev/null +++ b/pages/kubernetes/how-to/delete-node-pool.mdx @@ -0,0 +1,31 @@ +--- +meta: + title: How to delete a Kubernetes Kapsule node pool + description: Learn how to delete a node pool from an existing Kubernetes Kapsule cluster. +content: + h1: How to delete a a new Kubernetes Kapsule node pool + paragraph: Learn how to delete node pool from an existing Kubernetes Kapsule cluster. +tags: kubernetes kapsule kosmos +dates: + validation: 2025-06-23 + posted: 2025-06-23 +categories: + - containers +--- + +This documentation provides step-by-step instructions on how to create a new node pool for an existing Kubernetes Kapsule cluster. + + + +- A Scaleway account logged into the [console](https://console.scaleway.com) +- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization +- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) + +1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. +2. Click the Kapsule cluster name you want to manage. The cluster information page displays. +3. Click the **Pools** tab to display the pool configuration of the cluster. +4. Click > **Delete** next to the node pool you want to delete. +5. Click **Delete pool** in the pop-up to confirm deletion of the pool. + + This action will permanently destroy your pool and all its data. + \ No newline at end of file diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx index 814ce9cbc2..f812e09269 100644 --- a/pages/kubernetes/how-to/migrate-node-pool.mdx +++ b/pages/kubernetes/how-to/migrate-node-pool.mdx @@ -1,9 +1,9 @@ --- meta: - title: Migrating Kubernetes workloads to a new node pool + title: How to migrate Kubetnetes workloads to a new node pool description: Learn how to migrate existing Kubernetes workloads to a new node pool. content: - h1: Migrating Kubernetes workloads to a new node pools + h1: How to migrate Kubetnetes workloads to a new node pools paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. tags: kubernetes kapsule kosmos dates: @@ -44,4 +44,4 @@ Migrating workloads can be required to change the commercial type of Instance fo 5. After draining, verify that the pods have been rescheduled to the new node pool. Run `kubectl get pods -o wide` after daraining, to verify that the pods have been rescheduled to the new node pool -6. Delete the old node pool once you confirm that all workloads are running smoothly on the new node pool. +6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. From c824ef42747430644c500bfa59822c749d05ea0e Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 16:54:18 +0200 Subject: [PATCH 04/13] Apply suggestions from code review Co-authored-by: Jessica <113192637+jcirinosclwy@users.noreply.github.com> --- pages/kubernetes/how-to/migrate-node-pool.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx index f812e09269..fd10892e26 100644 --- a/pages/kubernetes/how-to/migrate-node-pool.mdx +++ b/pages/kubernetes/how-to/migrate-node-pool.mdx @@ -40,8 +40,8 @@ Migrating workloads can be required to change the commercial type of Instance fo - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. - - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for furher information. + - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for further information. -5. After draining, verify that the pods have been rescheduled to the new node pool. Run `kubectl get pods -o wide` after daraining, to verify that the pods have been rescheduled to the new node pool +5. Run `kubectl get pods -o wide` after draining, to verify that the pods have been rescheduled to the new node pool. 6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. From d58edaa442f15ab3effaa802557dc8262c3652c1 Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 16:57:10 +0200 Subject: [PATCH 05/13] feat(k8s): update doc --- pages/kubernetes/how-to/migrate-node-pool.mdx | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx index fd10892e26..8c66696447 100644 --- a/pages/kubernetes/how-to/migrate-node-pool.mdx +++ b/pages/kubernetes/how-to/migrate-node-pool.mdx @@ -31,17 +31,12 @@ Migrating workloads can be required to change the commercial type of Instance fo Ensure that the new node pool is properly labeled if necessary. - 2. Run `kubectl get nodes` to check that the new nodes are in a `Ready` state. - 3. Cordon the nodes in the old node pool to prevent new pods from being scheduled there. For each node, run: `kubectl cordon ` - 4. Drain the nodes to evict the pods gracefully. - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for further information. - 5. Run `kubectl get pods -o wide` after draining, to verify that the pods have been rescheduled to the new node pool. - 6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. From fc820d387f72f846eb5c659ab594c439904a576d Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 17:17:29 +0200 Subject: [PATCH 06/13] docs(k8s): update --- .../instances/api-cli/migrating-instances.mdx | 2 +- pages/kubernetes/how-to/create-node-pool.mdx | 36 -------- pages/kubernetes/how-to/delete-node-pool.mdx | 31 ------- pages/kubernetes/how-to/manage-node-pools.mdx | 85 +++++++++++++++++++ pages/kubernetes/how-to/migrate-node-pool.mdx | 42 --------- 5 files changed, 86 insertions(+), 110 deletions(-) delete mode 100644 pages/kubernetes/how-to/create-node-pool.mdx delete mode 100644 pages/kubernetes/how-to/delete-node-pool.mdx create mode 100644 pages/kubernetes/how-to/manage-node-pools.mdx delete mode 100644 pages/kubernetes/how-to/migrate-node-pool.mdx diff --git a/pages/instances/api-cli/migrating-instances.mdx b/pages/instances/api-cli/migrating-instances.mdx index 390ff6ab66..9a4f330b51 100644 --- a/pages/instances/api-cli/migrating-instances.mdx +++ b/pages/instances/api-cli/migrating-instances.mdx @@ -28,7 +28,7 @@ To do so, you need the Instance’s ID and a valid API key. Do **not** manually change the commercial type of **Kubernetes Kapsule nodes** using the API or CLI. Kubernetes Kapsule nodes **must be managed** through Kubernetes. Modifying node types outside of the recommended method can lead to instability or unexpected behavior. - To change the commercial type of your nodes, create a new node pool with the desired Instance type and [migrate your workloads](/kubernets/how-to/migrate-node-pool/) to the new pool. + To change the commercial type of your nodes, create a new node pool with the desired Instance type and [migrate your workloads](/kubernetes/how-to/manage-node-pools/) to the new pool. diff --git a/pages/kubernetes/how-to/create-node-pool.mdx b/pages/kubernetes/how-to/create-node-pool.mdx deleted file mode 100644 index f36162b669..0000000000 --- a/pages/kubernetes/how-to/create-node-pool.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -meta: - title: How to create a new Kubernetes Kapsule node pool - description: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. -content: - h1: How to create a new Kubernetes Kapsule node pool - paragraph: Learn how to add a new node pool to an existing Kubernetes Kapsule cluster. -tags: kubernetes kapsule kosmos -dates: - validation: 2025-06-23 - posted: 2025-06-23 -categories: - - containers ---- - -This documentation provides step-by-step instructions on how to create a new node pool for an existing Kubernetes Kapsule cluster. - - - -- A Scaleway account logged into the [console](https://console.scaleway.com) -- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) - -1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. -2. Click the Kapsule cluster name you want to manage. The cluster information page displays. -3. Click the **Pools** tab to display the pool configuration of the cluster. -4. Click **Add pool** to launch the pool creation wizard. -5. Configure the pool: - - Choose the **Availability Zone** for the pool. - - Choose the commercial type of Instance for the pool. - - Configure the system volume. - - Configure pool options. - - Enter the pool's details. -6. Click **Add pool**. The pool gets added to your basket. Repeat the steps above to configure additional pools. -7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. -8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. \ No newline at end of file diff --git a/pages/kubernetes/how-to/delete-node-pool.mdx b/pages/kubernetes/how-to/delete-node-pool.mdx deleted file mode 100644 index 709bc0e671..0000000000 --- a/pages/kubernetes/how-to/delete-node-pool.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -meta: - title: How to delete a Kubernetes Kapsule node pool - description: Learn how to delete a node pool from an existing Kubernetes Kapsule cluster. -content: - h1: How to delete a a new Kubernetes Kapsule node pool - paragraph: Learn how to delete node pool from an existing Kubernetes Kapsule cluster. -tags: kubernetes kapsule kosmos -dates: - validation: 2025-06-23 - posted: 2025-06-23 -categories: - - containers ---- - -This documentation provides step-by-step instructions on how to create a new node pool for an existing Kubernetes Kapsule cluster. - - - -- A Scaleway account logged into the [console](https://console.scaleway.com) -- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) - -1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. -2. Click the Kapsule cluster name you want to manage. The cluster information page displays. -3. Click the **Pools** tab to display the pool configuration of the cluster. -4. Click > **Delete** next to the node pool you want to delete. -5. Click **Delete pool** in the pop-up to confirm deletion of the pool. - - This action will permanently destroy your pool and all its data. - \ No newline at end of file diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx new file mode 100644 index 0000000000..4d5447de5d --- /dev/null +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -0,0 +1,85 @@ +--- +meta: + title: How to manage Kubetnetes Kapsule node pools + description: Learn how to migrate existing Kubernetes workloads to a new node pool. +content: + h1: How to migrate Kubetnetes workloads to a new node pools + paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. +tags: kubernetes kapsule kosmos +dates: + validation: 2025-06-23 + posted: 2025-06-23 +categories: + - containers +--- + +This documentation provides step-by-step instructions on how to manage Kubernetes Kapsule node pools + + + +- A Scaleway account logged into the [console](https://console.scaleway.com) +- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization +- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) + +## How to create a new Kubernetes Kapsule node pool + + + Kubernetes Kapsule supports using both **fully isolated** and **controlled isolation** node pools within the same cluster. + + +1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. +2. Click the Kapsule cluster name you want to manage. The cluster information page displays. +3. Click the **Pools** tab to display the pool configuration of the cluster. +4. Click **Add pool** to launch the pool creation wizard. +5. Configure the pool: + - Choose the **Availability Zone** for the pool. + - Choose the commercial type of Instance for the pool. + - Configure the system volume. + - Configure pool options. + - Enter the pool's details. +6. Click **Add pool**. The pool gets added to your basket. Repeat the steps above to configure additional pools. +7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. +8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. + +## How to edit an existing Kubernetes Kapsule node pool + +1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. +2. Click the Kapsule cluster name you want to manage. The cluster information page displays. +3. Click the **Pools** tab to display the pool configuration of the cluster. +4. Click > **Delete** next to the node pool you want to edit. +5. Configure the pool: + - Update pool tags + - Configure autoscaling + - Enable or disable the [autoheal feature](/kubernetes/concepts/#autoheal) +6. Click **Update pool** to update the pool configuration. + +## How to migrate existing workloads to a new Kubernets Kapsule node pool + + + Always ensure that your **data is backed up** before performing any operations that could affect it. + + +1. Create the new node pool with the desired configuration either [from the console](/kubernetes/how-to/create-node-pool/) or by using `kubectl`. + + Ensure that the new node pool is properly labeled if necessary. + +2. Run `kubectl get nodes` to check that the new nodes are in a `Ready` state. +3. Cordon the nodes in the old node pool to prevent new pods from being scheduled there. For each node, run: `kubectl cordon ` +4. Drain the nodes to evict the pods gracefully. + - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` + - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. + - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. + - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for further information. +5. Run `kubectl get pods -o wide` after draining, to verify that the pods have been rescheduled to the new node pool. +6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. + +## How to delete an existing Kubernetes Kapsule node pool + +1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. +2. Click the Kapsule cluster name you want to manage. The cluster information page displays. +3. Click the **Pools** tab to display the pool configuration of the cluster. +4. Click > **Delete** next to the node pool you want to delete. +5. Click **Delete pool** in the pop-up to confirm deletion of the pool. + + This action will permanently destroy your pool and all its data. + \ No newline at end of file diff --git a/pages/kubernetes/how-to/migrate-node-pool.mdx b/pages/kubernetes/how-to/migrate-node-pool.mdx deleted file mode 100644 index 8c66696447..0000000000 --- a/pages/kubernetes/how-to/migrate-node-pool.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -meta: - title: How to migrate Kubetnetes workloads to a new node pool - description: Learn how to migrate existing Kubernetes workloads to a new node pool. -content: - h1: How to migrate Kubetnetes workloads to a new node pools - paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. -tags: kubernetes kapsule kosmos -dates: - validation: 2025-06-23 - posted: 2025-06-23 -categories: - - containers ---- - -This documentation provides step-by-step instructions on how to migrate Kubernetes workloads from one node pool to another within a Kubernetes Kapsule cluster. -Migrating workloads can be required to change the commercial type of Instance for your pool, or to scale your infrastructure. - - - -- A Scaleway account logged into the [console](https://console.scaleway.com) -- [Owner](/iam/concepts/#owner) status or [IAM permissions](/iam/concepts/#permission) allowing you to perform actions in the intended Organization -- Created a [Kubernetes Kapsule cluster](/kubernetes/how-to/create-cluster/) -- Have an existing node pool that you want to migrate - - - Always ensure that your **data is backed up** before performing any operations that could affect it. - - -1. Create the new node pool with the desired configuration either [from the console](/kubernetes/how-to/create-node-pool/) or by using `kubectl`. - - Ensure that the new node pool is properly labeled if necessary. - -2. Run `kubectl get nodes` to check that the new nodes are in a `Ready` state. -3. Cordon the nodes in the old node pool to prevent new pods from being scheduled there. For each node, run: `kubectl cordon ` -4. Drain the nodes to evict the pods gracefully. - - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` - - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. - - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. - - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for further information. -5. Run `kubectl get pods -o wide` after draining, to verify that the pods have been rescheduled to the new node pool. -6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. From c3a02922db9461a2a896e7159cd6f3c801db66ef Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 17:19:48 +0200 Subject: [PATCH 07/13] feat(k8s): update navigation --- menu/navigation.json | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/menu/navigation.json b/menu/navigation.json index 7cfa159922..966f828ffd 100644 --- a/menu/navigation.json +++ b/menu/navigation.json @@ -1876,16 +1876,8 @@ "slug": "connect-cluster-kubectl" }, { - "label": "Create a new node Kapsule node pool", - "slug": "create-node-pool" - }, - { - "label": "Migrate a Kapsule node pool", - "slug": "migrate-node-pool" - }, - { - "label": "Delete a Kapsule node pool", - "slug": "delete-node-pool" + "label": "Manage Kapsule node pools", + "slug": "manage-node-pools" }, { "label": "Deploy an image from Container Registry", From 90c55b63d410f210d164a977f6a03efe272702ff Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 17:26:37 +0200 Subject: [PATCH 08/13] Apply suggestions from code review --- pages/instances/api-cli/migrating-instances.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pages/instances/api-cli/migrating-instances.mdx b/pages/instances/api-cli/migrating-instances.mdx index 9a4f330b51..95fc85209e 100644 --- a/pages/instances/api-cli/migrating-instances.mdx +++ b/pages/instances/api-cli/migrating-instances.mdx @@ -28,7 +28,7 @@ To do so, you need the Instance’s ID and a valid API key. Do **not** manually change the commercial type of **Kubernetes Kapsule nodes** using the API or CLI. Kubernetes Kapsule nodes **must be managed** through Kubernetes. Modifying node types outside of the recommended method can lead to instability or unexpected behavior. - To change the commercial type of your nodes, create a new node pool with the desired Instance type and [migrate your workloads](/kubernetes/how-to/manage-node-pools/) to the new pool. + To change the commercial type of your nodes, create a new node pool with the desired Instance type and [migrate your workloads](/kubernetes/how-to/manage-node-pools/#how-to-migrate-existing-workloads-to-a-new-kubernets-kapsule-node-pool) to the new pool. From 20aaa100738f4ab5818ff12bb98f39d8988e1a8e Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 17:39:02 +0200 Subject: [PATCH 09/13] Apply suggestions from code review --- pages/kubernetes/how-to/manage-node-pools.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx index 4d5447de5d..e0c8a2b97d 100644 --- a/pages/kubernetes/how-to/manage-node-pools.mdx +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -71,7 +71,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete - The `--delete-emptydir-data` flag is necessary if your pods use emptyDir volumes, but use this option carefully as it will delete the data stored in these volumes. - Refer to the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) for further information. 5. Run `kubectl get pods -o wide` after draining, to verify that the pods have been rescheduled to the new node pool. -6. [Delete the old node pool](/kubernetes/how-to/delete-node-pool/) once you confirm that all workloads are running smoothly on the new node pool. +6. [Delete the old node pool](#how-to-delete-an-existing-kubernetes-kapsule-node-pool) once you confirm that all workloads are running smoothly on the new node pool. ## How to delete an existing Kubernetes Kapsule node pool From d015d806f966ea92b61ba4c4ecd08a19919df60a Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 17:25:18 +0200 Subject: [PATCH 10/13] feat(k8s): update wording --- pages/kubernetes/how-to/manage-node-pools.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx index e0c8a2b97d..921cf362b8 100644 --- a/pages/kubernetes/how-to/manage-node-pools.mdx +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -1,10 +1,10 @@ --- meta: title: How to manage Kubetnetes Kapsule node pools - description: Learn how to migrate existing Kubernetes workloads to a new node pool. + description: Learn how to manage Kubernetes Kapsule node pools from the Scaleway console. content: h1: How to migrate Kubetnetes workloads to a new node pools - paragraph: Learn how to migrate existing Kubernetes workloads to a new node pool. + paragraph: Learn how to manage Kubernetes Kapsule node pools from the Scaleway console. tags: kubernetes kapsule kosmos dates: validation: 2025-06-23 @@ -13,7 +13,7 @@ categories: - containers --- -This documentation provides step-by-step instructions on how to manage Kubernetes Kapsule node pools +This documentation provides step-by-step instructions on how to manage Kubernetes Kapsule node pools using the Scaleway console. From 4d1245dd4868a571c4b72c6dee0f5d1322e9aaa8 Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Mon, 23 Jun 2025 18:17:31 +0200 Subject: [PATCH 11/13] Apply suggestions from code review Co-authored-by: Nox <5402193+nox-404@users.noreply.github.com> --- pages/kubernetes/how-to/manage-node-pools.mdx | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx index 921cf362b8..145f0a9f89 100644 --- a/pages/kubernetes/how-to/manage-node-pools.mdx +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -1,9 +1,9 @@ --- meta: - title: How to manage Kubetnetes Kapsule node pools + title: How to manage Kubernetes Kapsule node pools description: Learn how to manage Kubernetes Kapsule node pools from the Scaleway console. content: - h1: How to migrate Kubetnetes workloads to a new node pools + h1: How to manage Kubernetes node pools paragraph: Learn how to manage Kubernetes Kapsule node pools from the Scaleway console. tags: kubernetes kapsule kosmos dates: @@ -46,11 +46,11 @@ This documentation provides step-by-step instructions on how to manage Kubernete 1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. 2. Click the Kapsule cluster name you want to manage. The cluster information page displays. 3. Click the **Pools** tab to display the pool configuration of the cluster. -4. Click > **Delete** next to the node pool you want to edit. +4. Click > **Edit** next to the node pool you want to edit. 5. Configure the pool: - Update pool tags - Configure autoscaling - - Enable or disable the [autoheal feature](/kubernetes/concepts/#autoheal) + - Enable or disable the [autoheal feature](/kubernetes/reference-content/using-kapsule-autoheal-feature/) 6. Click **Update pool** to update the pool configuration. ## How to migrate existing workloads to a new Kubernets Kapsule node pool @@ -59,12 +59,15 @@ This documentation provides step-by-step instructions on how to manage Kubernete Always ensure that your **data is backed up** before performing any operations that could affect it. -1. Create the new node pool with the desired configuration either [from the console](/kubernetes/how-to/create-node-pool/) or by using `kubectl`. +1. Create the new node pool with the desired configuration either [from the console](#how-to-create-a-new-kubernetes-kapsule-node-pool) or by using `scw`. Ensure that the new node pool is properly labeled if necessary. 2. Run `kubectl get nodes` to check that the new nodes are in a `Ready` state. 3. Cordon the nodes in the old node pool to prevent new pods from being scheduled there. For each node, run: `kubectl cordon ` + + You can use a selector on the pool name label to cordon or drain multiple nodes at the same time if your app allows it (ex. `kubectl cordon -l k8s.scaleway.com/pool-name=mypoolname`) + 4. Drain the nodes to evict the pods gracefully. - For each node, run: `kubectl drain --ignore-daemonsets --delete-emptydir-data` - The `--ignore-daemonsets` flag is used because daemon sets manage pods across all nodes and will automatically reschedule them. From 27527057b88081ccdff998c860cd6b31db6794eb Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Tue, 24 Jun 2025 09:20:11 +0200 Subject: [PATCH 12/13] feat(k8s): add links to cli --- pages/kubernetes/how-to/manage-node-pools.mdx | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx index 145f0a9f89..e76f883392 100644 --- a/pages/kubernetes/how-to/manage-node-pools.mdx +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -24,7 +24,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete ## How to create a new Kubernetes Kapsule node pool - Kubernetes Kapsule supports using both **fully isolated** and **controlled isolation** node pools within the same cluster. + Kubernetes Kapsule supports using both [**fully isolated** and **controlled isolation**](kubernetes/reference-content/secure-cluster-with-private-network/#what-is-the-difference-between-controlled-isolation-and-full-isolation) node pools within the same cluster. 1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. @@ -41,6 +41,10 @@ This documentation provides step-by-step instructions on how to manage Kubernete 7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. 8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. + + Alternatively, you can use the Scaleway CLI to [create node pools](https://cli.scaleway.com/k8s/#create-a-new-pool-in-a-cluster). + + ## How to edit an existing Kubernetes Kapsule node pool 1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. @@ -53,6 +57,10 @@ This documentation provides step-by-step instructions on how to manage Kubernete - Enable or disable the [autoheal feature](/kubernetes/reference-content/using-kapsule-autoheal-feature/) 6. Click **Update pool** to update the pool configuration. + + Alternatively, you can use the Scaleway CLI to [update a node pool](https://cli.scaleway.com/k8s/#update-a-pool-in-a-cluster). + + ## How to migrate existing workloads to a new Kubernets Kapsule node pool @@ -85,4 +93,8 @@ This documentation provides step-by-step instructions on how to manage Kubernete 5. Click **Delete pool** in the pop-up to confirm deletion of the pool. This action will permanently destroy your pool and all its data. - \ No newline at end of file + + + + Alternatively, you can use the Scaleway CLI to [delete a node pool](https://cli.scaleway.com/k8s/#delete-a-pool-in-a-cluster). + \ No newline at end of file From e9f21c003fdfae417300e714a97d9073ca6dc15c Mon Sep 17 00:00:00 2001 From: Benedikt Rollik Date: Tue, 24 Jun 2025 09:32:28 +0200 Subject: [PATCH 13/13] feat(k8s): update wording --- pages/kubernetes/how-to/manage-node-pools.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pages/kubernetes/how-to/manage-node-pools.mdx b/pages/kubernetes/how-to/manage-node-pools.mdx index e76f883392..41fb10fdcf 100644 --- a/pages/kubernetes/how-to/manage-node-pools.mdx +++ b/pages/kubernetes/how-to/manage-node-pools.mdx @@ -24,7 +24,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete ## How to create a new Kubernetes Kapsule node pool - Kubernetes Kapsule supports using both [**fully isolated** and **controlled isolation**](kubernetes/reference-content/secure-cluster-with-private-network/#what-is-the-difference-between-controlled-isolation-and-full-isolation) node pools within the same cluster. + Kubernetes Kapsule supports using both **fully isolated** and **controlled isolation** node pools within the same cluster. [Learn more.](/kubernetes/reference-content/secure-cluster-with-private-network/#what-is-the-difference-between-controlled-isolation-and-full-isolation) 1. Navigate to **Kubernetes** under the **Containers** section of the [Scaleway console](https://console.scaleway.com/) side menu. The Kubernetes dashboard displays. @@ -41,7 +41,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete 7. Click **Review** once you have configured the desired pools. A summary of your configuration displays. 8. Verify your configuration and click **Submit** to add the pool(s) to your Kapsule cluster. - + Alternatively, you can use the Scaleway CLI to [create node pools](https://cli.scaleway.com/k8s/#create-a-new-pool-in-a-cluster). @@ -57,7 +57,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete - Enable or disable the [autoheal feature](/kubernetes/reference-content/using-kapsule-autoheal-feature/) 6. Click **Update pool** to update the pool configuration. - + Alternatively, you can use the Scaleway CLI to [update a node pool](https://cli.scaleway.com/k8s/#update-a-pool-in-a-cluster). @@ -67,7 +67,7 @@ This documentation provides step-by-step instructions on how to manage Kubernete Always ensure that your **data is backed up** before performing any operations that could affect it. -1. Create the new node pool with the desired configuration either [from the console](#how-to-create-a-new-kubernetes-kapsule-node-pool) or by using `scw`. +1. Create the new node pool with the desired configuration either [from the console](#how-to-create-a-new-kubernetes-kapsule-node-pool) or by using the Scaleway CLI tool `scw`. Ensure that the new node pool is properly labeled if necessary.