diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden index 0ce1a5ee0f..9d87508264 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to create a new Kubernetes cluster on an account. +Creates a new Kubernetes cluster on a Scaleway account. USAGE: scw k8s cluster create [arg=value ...] @@ -14,28 +14,28 @@ EXAMPLES: ARGS: [project-id] Project ID to use. If none is passed the default project ID will be used - [type] The type of the cluster - name= The name of the cluster - [description] The description of the cluster - [tags.{index}] The tags associated with the cluster - version=latest The Kubernetes version of the cluster - cni=cilium The Container Network Interface (CNI) plugin that will run in the cluster (unknown_cni | cilium | calico | weave | flannel | kilo) - pools.{index}.name The name of the pool - pools.{index}.node-type The node type is the type of Scaleway Instance wanted for the pool - [pools.{index}.placement-group-id] The placement group ID in which all the nodes of the pool will be created - [pools.{index}.autoscaling] The enablement of the autoscaling feature for the pool - pools.{index}.size The size (number of nodes) of the pool - [pools.{index}.min-size] The minimum size of the pool - [pools.{index}.max-size] The maximum size of the pool - [pools.{index}.container-runtime] The container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) - [pools.{index}.autohealing] The enablement of the autohealing feature for the pool - [pools.{index}.tags.{index}] The tags associated with the pool - [pools.{index}.kubelet-args.{key}] The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + [type] Type of the cluster + name= Name of the cluster + [description] Description of the cluster + [tags.{index}] Tags associated with the cluster + version=latest Kubernetes version of the cluster + cni=cilium Container Network Interface (CNI) plugin that will run in the cluster (unknown_cni | cilium | calico | weave | flannel | kilo) + pools.{index}.name Name of the pool + pools.{index}.node-type Node type is the type of Scaleway Instance wanted for the pool + [pools.{index}.placement-group-id] Placement group ID in which all the nodes of the pool will be created + [pools.{index}.autoscaling] Defines whether the autoscaling feature is enabled for the pool + pools.{index}.size Size (number of nodes) of the pool + [pools.{index}.min-size] Minimum size of the pool + [pools.{index}.max-size] Maximum size of the pool + [pools.{index}.container-runtime] Container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) + [pools.{index}.autohealing] Defines whether the autohealing feature is enabled for the pool + [pools.{index}.tags.{index}] Tags associated with the pool + [pools.{index}.kubelet-args.{key}] Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [pools.{index}.upgrade-policy.max-unavailable] The maximum number of nodes that can be not ready at the same time [pools.{index}.upgrade-policy.max-surge] The maximum number of nodes to be created during the upgrade - [pools.{index}.zone] The Zone in which the Pool's node will be spawn in - [pools.{index}.root-volume-type] The system volume disk type (default_volume_type | l_ssd | b_ssd) - [pools.{index}.root-volume-size] The system volume disk size + [pools.{index}.zone] Zone in which the pool's nodes will be spawned + [pools.{index}.root-volume-type] System volume disk type (default_volume_type | l_ssd | b_ssd) + [pools.{index}.root-volume-size] System volume disk size [autoscaler-config.scale-down-disabled] Disable the cluster autoscaler [autoscaler-config.scale-down-delay-after-add] How long after scale up that scale down evaluation resumes [autoscaler-config.estimator] Type of resource estimator to be used in scale up (unknown_estimator | binpacking) @@ -47,8 +47,8 @@ ARGS: [autoscaler-config.scale-down-utilization-threshold] Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down [autoscaler-config.max-graceful-termination-sec] Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node [auto-upgrade.enable] Whether or not auto upgrade is enabled for the cluster - [auto-upgrade.maintenance-window.start-hour] The start hour of the 2-hour maintenance window - [auto-upgrade.maintenance-window.day] The day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) + [auto-upgrade.maintenance-window.start-hour] Start time of the two-hour maintenance window + [auto-upgrade.maintenance-window.day] Day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) [feature-gates.{index}] List of feature gates to enable [admission-plugins.{index}] List of admission plugins to enable [open-id-connect-config.issuer-url] URL of the provider which allows the API server to discover public signing keys @@ -57,14 +57,14 @@ ARGS: [open-id-connect-config.username-prefix] Prefix prepended to username [open-id-connect-config.groups-claim.{index}] JWT claim to use as the user's group [open-id-connect-config.groups-prefix] Prefix prepended to group claims - [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID Token + [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID token [apiserver-cert-sans.{index}] Additional Subject Alternative Names for the Kubernetes API server certificate [organization-id] Organization ID to use. If none is passed the default organization ID will be used [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) DEPRECATED ARGS: - [enable-dashboard] The enablement of the Kubernetes Dashboard in the cluster - [ingress] The Ingress Controller that will run in the cluster (unknown_ingress | none | nginx | traefik | traefik2) + [enable-dashboard] Defines if the Kubernetes Dashboard is enabled in the cluster + [ingress] Ingress Controller that will run in the cluster (unknown_ingress | none | nginx | traefik | traefik2) FLAGS: -h, --help help for create diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden index 2eb318f139..bf8d2269d6 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. +Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. USAGE: scw k8s cluster delete [arg=value ...] EXAMPLES: - Delete a given cluster + Delete a cluster scw k8s cluster delete 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster to delete + cluster-id ID of the cluster to delete [with-additional-resources] Set true if you want to delete all volumes (including retain volume type) and loadbalancers whose name start with cluster ID [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden index 1f5a8447e6..e43e4054e6 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific Kubernetes cluster. +Get details about a specific Kubernetes cluster. USAGE: scw k8s cluster get [arg=value ...] EXAMPLES: - Get a given cluster + Get a cluster information scw k8s cluster get 11111111-1111-1111-111111111111 ARGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden index b7af2cedfb..18e29eadd3 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work. +List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work. USAGE: scw k8s cluster list-available-versions [arg=value ...] EXAMPLES: - List all available versions for a given cluster to upgrade to + List all available versions for a cluster to upgrade to scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster which the available Kuberentes versions will be listed from + cluster-id ID of the cluster which the available Kuberentes versions will be listed from [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden index 5c9e227798..ea8b52de8d 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing Kubernetes clusters in an account. +List all the existing Kubernetes clusters in a specific Region. USAGE: scw k8s cluster list [arg=value ...] EXAMPLES: - List all the clusters on your default region + List all clusters on your default region scw k8s cluster list List the ready clusters on your default region @@ -16,12 +16,12 @@ EXAMPLES: scw k8s cluster list region=fr-par name=cluster1 ARGS: - [project-id] The project ID on which to filter the returned clusters - [order-by] The sort order of the returned clusters (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) - [name] The name on which to filter the returned clusters - [status] The status on which to filter the returned clusters (unknown | creating | ready | deleting | deleted | updating | locked | pool_required) - [type] The type on which to filter the returned clusters - [organization-id] The organization ID on which to filter the returned clusters + [project-id] Project ID on which to filter the returned clusters + [order-by] Sort order of the returned clusters (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) + [name] Name on which to filter the returned clusters + [status] Status on which to filter the returned clusters (unknown | creating | ready | deleting | deleted | updating | locked | pool_required) + [type] Type on which to filter the returned clusters + [organization-id] Organization ID on which to filter the returned clusters [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden index dd1bf8963d..8bdd22bff0 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used). +Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster. USAGE: scw k8s cluster reset-admin-token [arg=value ...] EXAMPLES: - Reset the admin token for a given cluster + Reset the admin token for a cluster scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster of which the admin token will be renewed + cluster-id ID of the cluster on which the admin token will be renewed [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden index dd653291e6..be27dc10b2 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster. +Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint. USAGE: scw k8s cluster update [arg=value ...] EXAMPLES: - Enable dashboard on a given cluster + Enable dashboard on a cluster scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true - Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster + Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion ARGS: - cluster-id The ID of the cluster to update - [name] The new name of the cluster - [description] The new description of the cluster - [tags.{index}] The new tags associated with the cluster + cluster-id ID of the cluster to update + [name] New external name of the cluster + [description] New description of the cluster + [tags.{index}] New tags associated with the cluster [autoscaler-config.scale-down-disabled] Disable the cluster autoscaler [autoscaler-config.scale-down-delay-after-add] How long after scale up that scale down evaluation resumes [autoscaler-config.estimator] Type of resource estimator to be used in scale up (unknown_estimator | binpacking) @@ -28,8 +28,8 @@ ARGS: [autoscaler-config.scale-down-utilization-threshold] Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down [autoscaler-config.max-graceful-termination-sec] Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node [auto-upgrade.enable] Whether or not auto upgrade is enabled for the cluster - [auto-upgrade.maintenance-window.start-hour] The start hour of the 2-hour maintenance window - [auto-upgrade.maintenance-window.day] The day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) + [auto-upgrade.maintenance-window.start-hour] Start time of the two-hour maintenance window + [auto-upgrade.maintenance-window.day] Day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) [feature-gates.{index}] List of feature gates to enable [admission-plugins.{index}] List of admission plugins to enable [open-id-connect-config.issuer-url] URL of the provider which allows the API server to discover public signing keys @@ -38,13 +38,13 @@ ARGS: [open-id-connect-config.username-prefix] Prefix prepended to username [open-id-connect-config.groups-claim.{index}] JWT claim to use as the user's group [open-id-connect-config.groups-prefix] Prefix prepended to group claims - [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID Token + [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID token [apiserver-cert-sans.{index}] Additional Subject Alternative Names for the Kubernetes API server certificate [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) DEPRECATED ARGS: - [enable-dashboard] The new value of the Kubernetes Dashboard enablement - [ingress] The new Ingress Controller for the cluster (unknown_ingress | none | nginx | traefik | traefik2) + [enable-dashboard] New value of the Kubernetes Dashboard enablement + [ingress] New Ingress Controller for the cluster (unknown_ingress | none | nginx | traefik | traefik2) FLAGS: -h, --help help for update diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden index db0b8f8e3f..b5e301a9fa 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden @@ -1,21 +1,21 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. +Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. USAGE: scw k8s cluster upgrade [arg=value ...] EXAMPLES: - Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools) + Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools) scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 - Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools) + Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools) scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true ARGS: - cluster-id The ID of the cluster to upgrade - version The new Kubernetes version of the cluster - [upgrade-pools] The enablement of the pools upgrade + cluster-id ID of the cluster to upgrade + version New Kubernetes version of the cluster + [upgrade-pools] Enablement of the pools upgrade [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden index 077a8c6fad..b937ddcecf 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden @@ -10,8 +10,8 @@ USAGE: AVAILABLE COMMANDS: create Create a new cluster delete Delete a cluster - get Get a cluster - list List all the clusters + get Get specific cluster information + list List all clusters list-available-versions List available versions for a cluster reset-admin-token Reset the admin token of a cluster update Update a cluster diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden index ccb382c842..70997ed4e4 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications. USAGE: scw k8s node delete [arg=value ...] EXAMPLES: - Delete a given node + Delete a node scw k8s node delete 11111111-1111-1111-111111111111 - Delete a given node without evicting workloads + Delete a node without evicting workloads scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true - Replace a given node by a new one + Replace a node by a new one scw k8s node delete 11111111-1111-1111-111111111111 replace=true ARGS: - node-id The ID of the node to replace + node-id ID of the node to replace [skip-drain] Skip draining node from its workload [replace] Add a new node after the deletion of this node [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden index 9576b867b7..914ce3476e 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific Kubernetes node. +Get details about a specific Kubernetes node. USAGE: scw k8s node get [arg=value ...] EXAMPLES: - Get a given node + Get a node scw k8s node get 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the requested node + node-id ID of the requested node [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden index 297e17d20a..676ac40462 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden @@ -1,26 +1,26 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing nodes for a specific Kubernetes cluster. +List all the existing nodes for a specific Kubernetes cluster. USAGE: scw k8s node list [arg=value ...] EXAMPLES: - List all the nodes in the given cluster + List all the nodes in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 - List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster + List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222 - List all ready nodes in the given cluster + List all ready nodes in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ARGS: - cluster-id The cluster ID from which the nodes will be listed from - [pool-id] The pool ID on which to filter the returned nodes - [order-by] The sort order of the returned nodes (created_at_asc | created_at_desc) - [name] The name on which to filter the returned nodes - [status] The status on which to filter the returned nodes (unknown | creating | not_ready | ready | deleting | deleted | locked | rebooting | creation_error | upgrading | starting | registering) + cluster-id Cluster ID from which the nodes will be listed from + [pool-id] Pool ID on which to filter the returned nodes + [order-by] Sort order of the returned nodes (created_at_asc | created_at_desc) + [name] Name on which to filter the returned nodes + [status] Status on which to filter the returned nodes (unknown | creating | not_ready | ready | deleting | deleted | locked | rebooting | creation_error | upgrading | starting | registering) [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden index 628120a685..fd24b2881c 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications. USAGE: scw k8s node reboot [arg=value ...] EXAMPLES: - Reboot a given node + Reboot a node scw k8s node reboot 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the node to reboot + node-id ID of the node to reboot [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden index 51893d33cf..41762effe0 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. USAGE: scw k8s node replace [arg=value ...] EXAMPLES: - Replace a given node + Replace a node scw k8s node replace 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the node to replace + node-id ID of the node to replace [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden index d10f66e4e7..07696542f5 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden @@ -1,8 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. +A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. USAGE: scw k8s node diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden index 28f0145fb6..435ceda6d5 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden @@ -1,38 +1,38 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to create a new pool in a specific Kubernetes cluster. +Create a new pool in a specific Kubernetes cluster. USAGE: scw k8s pool create [arg=value ...] EXAMPLES: - Create a pool named bar with 2 DEV1-XL on a given cluster + Create a pool named bar with 2 DEV1-XL on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2 - Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster + Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd - Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster + Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group ARGS: - cluster-id The ID of the cluster in which the pool will be created - name= The name of the pool - node-type=DEV1-M The node type is the type of Scaleway Instance wanted for the pool - [placement-group-id] The placement group ID in which all the nodes of the pool will be created - [autoscaling] The enablement of the autoscaling feature for the pool - size=1 The size (number of nodes) of the pool - [min-size] The minimum size of the pool - [max-size] The maximum size of the pool - [container-runtime] The container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) - [autohealing] The enablement of the autohealing feature for the pool - [tags.{index}] The tags associated with the pool - [kubelet-args.{key}] The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + cluster-id ID of the cluster in which the pool will be created + name= Name of the pool + node-type=DEV1-M Node type is the type of Scaleway Instance wanted for the pool + [placement-group-id] Placement group ID in which all the nodes of the pool will be created + [autoscaling] Defines whether the autoscaling feature is enabled for the pool + size=1 Size (number of nodes) of the pool + [min-size] Minimum size of the pool + [max-size] Maximum size of the pool + [container-runtime] Container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) + [autohealing] Defines whether the autohealing feature is enabled for the pool + [tags.{index}] Tags associated with the pool + [kubelet-args.{key}] Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [upgrade-policy.max-unavailable] [upgrade-policy.max-surge] - [zone] The Zone in which the Pool's node will be spawn in - [root-volume-type] The system volume disk type (default_volume_type | l_ssd | b_ssd) - [root-volume-size] The system volume disk size + [zone] Zone in which the pool's nodes will be spawned + [root-volume-type] System volume disk type (default_volume_type | l_ssd | b_ssd) + [root-volume-size] System volume disk size [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden index 3f84d7c3f3..ab0aa557ae 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it. +Delete a specific pool from a cluster. All of the pool's nodes will also be deleted. USAGE: scw k8s pool delete [arg=value ...] EXAMPLES: - Delete a given pool + Delete a specific pool scw k8s pool delete 11111111-1111-1111-111111111111 ARGS: - pool-id The ID of the pool to delete + pool-id ID of the pool to delete [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden index 6d65f617b3..cbf1eb8068 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific pool. +Get details about a specific pool in a Kubernetes cluster. USAGE: scw k8s pool get [arg=value ...] @@ -10,7 +10,7 @@ EXAMPLES: scw k8s pool get 11111111-1111-1111-111111111111 ARGS: - pool-id The ID of the requested pool + pool-id ID of the requested pool [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden index a3f674b0a0..d5161b277d 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden @@ -1,28 +1,28 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing pools for a specific Kubernetes cluster. +List all the existing pools for a specific Kubernetes cluster. USAGE: scw k8s pool list [arg=value ...] EXAMPLES: - List all pools for a given cluster + List all pools for a cluster scw k8s pool list cluster-id=11111111-1111-1111-111111111111 - List all scaling pools for a given cluster + List all scaling pools for a cluster scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling - List all pools for a given cluster that contain the word foo in the pool name + List all pools for a cluster that contains the word 'foo' in the pool name scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo - List all pools for a given cluster and order them by ascending creation date + List all pools for a cluster and order them by ascending creation date scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc ARGS: - cluster-id The ID of the cluster from which the pools will be listed from - [order-by] The sort order of the returned pools (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) - [name] The name on which to filter the returned pools - [status] The status on which to filter the returned pools (unknown | ready | deleting | deleted | scaling | warning | locked | upgrading) + cluster-id ID of the cluster from which the pools will be listed from + [order-by] Sort order of the returned pools (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) + [name] Name on which to filter the returned pools + [status] Status on which to filter the returned pools (unknown | ready | deleting | deleted | scaling | warning | locked | upgrading) [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden index 8c2c5025f1..a790f10739 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ... +Update attributes of a specific pool, such as size, autoscaling settings, and tags. USAGE: scw k8s pool update [arg=value ...] @@ -16,14 +16,14 @@ EXAMPLES: scw k8s pool update 11111111-1111-1111-111111111111 tags.0=my tags.1=new tags.2=pool ARGS: - pool-id The ID of the pool to update - [autoscaling] The new value for the enablement of autoscaling for the pool - [size] The new size for the pool - [min-size] The new minimun size for the pool - [max-size] The new maximum size for the pool - [autohealing] The new value for the enablement of autohealing for the pool - [tags.{index}] The new tags associated with the pool - [kubelet-args.{key}] The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + pool-id ID of the pool to update + [autoscaling] New value for the enablement of autoscaling for the pool + [size] New size for the pool + [min-size] New minimun size for the pool + [max-size] New maximum size for the pool + [autohealing] New value for the enablement of autohealing for the pool + [tags.{index}] New tags associated with the pool + [kubelet-args.{key}] New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [upgrade-policy.max-unavailable] [upgrade-policy.max-surge] [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden index dbce9fca21..4c129f4afa 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden @@ -1,17 +1,17 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. +Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. USAGE: scw k8s pool upgrade [arg=value ...] EXAMPLES: - Upgrade a given pool to the Kubernetes version 1.24.7 + Upgrade a specific pool to the Kubernetes version 1.24.7 scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7 ARGS: - pool-id The ID of the pool to upgrade - version The new Kubernetes version for the pool + pool-id ID of the pool to upgrade + version New Kubernetes version for the pool [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden index 30282c8243..7b8b306a03 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden @@ -1,8 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. +A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. USAGE: scw k8s pool diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden index f67e16b6c4..42c00bdeb9 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get a specific Kubernetes version and the details about the version. +Get a specific Kubernetes version and the details about the version. USAGE: scw k8s version get [arg=value ...] @@ -10,7 +10,7 @@ EXAMPLES: scw k8s version get 1.24.7 ARGS: - version-name The requested version name + version-name Requested version name [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden index 9a5e4aed6d..65f7045487 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all available versions for the creation of a new Kubernetes cluster. +List all available versions for the creation of a new Kubernetes cluster. USAGE: scw k8s version list [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden index ddff28fb75..7374a72399 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden @@ -1,10 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A version is a vanilla Kubernetes version like `x.y.z`. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. +A version is a vanilla Kubernetes version like `x.y.z`. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. USAGE: scw k8s version diff --git a/docs/commands/k8s.md b/docs/commands/k8s.md index 711ddb79b4..85d0527f1e 100644 --- a/docs/commands/k8s.md +++ b/docs/commands/k8s.md @@ -5,8 +5,8 @@ Kapsule API. - [Kapsule cluster management commands](#kapsule-cluster-management-commands) - [Create a new cluster](#create-a-new-cluster) - [Delete a cluster](#delete-a-cluster) - - [Get a cluster](#get-a-cluster) - - [List all the clusters](#list-all-the-clusters) + - [Get specific cluster information](#get-specific-cluster-information) + - [List all clusters](#list-all-clusters) - [List available versions for a cluster](#list-available-versions-for-a-cluster) - [Reset the admin token of a cluster](#reset-the-admin-token-of-a-cluster) - [Update a cluster](#update-a-cluster) @@ -46,7 +46,7 @@ It is composed of different pools, each pool containing the same kind of nodes. ### Create a new cluster -This method allows to create a new Kubernetes cluster on an account. +Creates a new Kubernetes cluster on a Scaleway account. **Usage:** @@ -60,30 +60,30 @@ scw k8s cluster create [arg=value ...] | Name | | Description | |------|---|-------------| | project-id | | Project ID to use. If none is passed the default project ID will be used | -| type | | The type of the cluster | -| name | Required
Default: `` | The name of the cluster | -| description | | The description of the cluster | -| tags.{index} | | The tags associated with the cluster | -| version | Required
Default: `latest` | The Kubernetes version of the cluster | -| cni | Required
Default: `cilium`
One of: `unknown_cni`, `cilium`, `calico`, `weave`, `flannel`, `kilo` | The Container Network Interface (CNI) plugin that will run in the cluster | -| ~~enable-dashboard~~ | Deprecated | The enablement of the Kubernetes Dashboard in the cluster | -| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | The Ingress Controller that will run in the cluster | -| pools.{index}.name | Required | The name of the pool | -| pools.{index}.node-type | Required | The node type is the type of Scaleway Instance wanted for the pool | -| pools.{index}.placement-group-id | | The placement group ID in which all the nodes of the pool will be created | -| pools.{index}.autoscaling | | The enablement of the autoscaling feature for the pool | -| pools.{index}.size | Required | The size (number of nodes) of the pool | -| pools.{index}.min-size | | The minimum size of the pool | -| pools.{index}.max-size | | The maximum size of the pool | -| pools.{index}.container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | The container runtime for the nodes of the pool | -| pools.{index}.autohealing | | The enablement of the autohealing feature for the pool | -| pools.{index}.tags.{index} | | The tags associated with the pool | -| pools.{index}.kubelet-args.{key} | | The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| type | | Type of the cluster | +| name | Required
Default: `` | Name of the cluster | +| description | | Description of the cluster | +| tags.{index} | | Tags associated with the cluster | +| version | Required
Default: `latest` | Kubernetes version of the cluster | +| cni | Required
Default: `cilium`
One of: `unknown_cni`, `cilium`, `calico`, `weave`, `flannel`, `kilo` | Container Network Interface (CNI) plugin that will run in the cluster | +| ~~enable-dashboard~~ | Deprecated | Defines if the Kubernetes Dashboard is enabled in the cluster | +| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | Ingress Controller that will run in the cluster | +| pools.{index}.name | Required | Name of the pool | +| pools.{index}.node-type | Required | Node type is the type of Scaleway Instance wanted for the pool | +| pools.{index}.placement-group-id | | Placement group ID in which all the nodes of the pool will be created | +| pools.{index}.autoscaling | | Defines whether the autoscaling feature is enabled for the pool | +| pools.{index}.size | Required | Size (number of nodes) of the pool | +| pools.{index}.min-size | | Minimum size of the pool | +| pools.{index}.max-size | | Maximum size of the pool | +| pools.{index}.container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | Container runtime for the nodes of the pool | +| pools.{index}.autohealing | | Defines whether the autohealing feature is enabled for the pool | +| pools.{index}.tags.{index} | | Tags associated with the pool | +| pools.{index}.kubelet-args.{key} | | Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | pools.{index}.upgrade-policy.max-unavailable | | The maximum number of nodes that can be not ready at the same time | | pools.{index}.upgrade-policy.max-surge | | The maximum number of nodes to be created during the upgrade | -| pools.{index}.zone | | The Zone in which the Pool's node will be spawn in | -| pools.{index}.root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | The system volume disk type | -| pools.{index}.root-volume-size | | The system volume disk size | +| pools.{index}.zone | | Zone in which the pool's nodes will be spawned | +| pools.{index}.root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | System volume disk type | +| pools.{index}.root-volume-size | | System volume disk size | | autoscaler-config.scale-down-disabled | | Disable the cluster autoscaler | | autoscaler-config.scale-down-delay-after-add | | How long after scale up that scale down evaluation resumes | | autoscaler-config.estimator | One of: `unknown_estimator`, `binpacking` | Type of resource estimator to be used in scale up | @@ -95,8 +95,8 @@ scw k8s cluster create [arg=value ...] | autoscaler-config.scale-down-utilization-threshold | | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down | | autoscaler-config.max-graceful-termination-sec | | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node | | auto-upgrade.enable | | Whether or not auto upgrade is enabled for the cluster | -| auto-upgrade.maintenance-window.start-hour | | The start hour of the 2-hour maintenance window | -| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | The day of the week for the maintenance window | +| auto-upgrade.maintenance-window.start-hour | | Start time of the two-hour maintenance window | +| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | Day of the week for the maintenance window | | feature-gates.{index} | | List of feature gates to enable | | admission-plugins.{index} | | List of admission plugins to enable | | open-id-connect-config.issuer-url | | URL of the provider which allows the API server to discover public signing keys | @@ -105,7 +105,7 @@ scw k8s cluster create [arg=value ...] | open-id-connect-config.username-prefix | | Prefix prepended to username | | open-id-connect-config.groups-claim.{index} | | JWT claim to use as the user's group | | open-id-connect-config.groups-prefix | | Prefix prepended to group claims | -| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID Token | +| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID token | | apiserver-cert-sans.{index} | | Additional Subject Alternative Names for the Kubernetes API server certificate | | organization-id | | Organization ID to use. If none is passed the default organization ID will be used | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -129,7 +129,7 @@ scw k8s cluster create name=bar version=1.24.7 tags.0=tag1 tags.1=tag2 cni=calic ### Delete a cluster -This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. +Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. **Usage:** @@ -142,7 +142,7 @@ scw k8s cluster delete [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to delete | +| cluster-id | Required | ID of the cluster to delete | | with-additional-resources | | Set true if you want to delete all volumes (including retain volume type) and loadbalancers whose name start with cluster ID | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -150,7 +150,7 @@ scw k8s cluster delete [arg=value ...] **Examples:** -Delete a given cluster +Delete a cluster ``` scw k8s cluster delete 11111111-1111-1111-111111111111 ``` @@ -158,9 +158,9 @@ scw k8s cluster delete 11111111-1111-1111-111111111111 -### Get a cluster +### Get specific cluster information -This method allows to get details about a specific Kubernetes cluster. +Get details about a specific Kubernetes cluster. **Usage:** @@ -180,7 +180,7 @@ scw k8s cluster get [arg=value ...] **Examples:** -Get a given cluster +Get a cluster information ``` scw k8s cluster get 11111111-1111-1111-111111111111 ``` @@ -188,9 +188,9 @@ scw k8s cluster get 11111111-1111-1111-111111111111 -### List all the clusters +### List all clusters -This method allows to list all the existing Kubernetes clusters in an account. +List all the existing Kubernetes clusters in a specific Region. **Usage:** @@ -203,19 +203,19 @@ scw k8s cluster list [arg=value ...] | Name | | Description | |------|---|-------------| -| project-id | | The project ID on which to filter the returned clusters | -| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | The sort order of the returned clusters | -| name | | The name on which to filter the returned clusters | -| status | One of: `unknown`, `creating`, `ready`, `deleting`, `deleted`, `updating`, `locked`, `pool_required` | The status on which to filter the returned clusters | -| type | | The type on which to filter the returned clusters | -| organization-id | | The organization ID on which to filter the returned clusters | +| project-id | | Project ID on which to filter the returned clusters | +| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | Sort order of the returned clusters | +| name | | Name on which to filter the returned clusters | +| status | One of: `unknown`, `creating`, `ready`, `deleting`, `deleted`, `updating`, `locked`, `pool_required` | Status on which to filter the returned clusters | +| type | | Type on which to filter the returned clusters | +| organization-id | | Organization ID on which to filter the returned clusters | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all the clusters on your default region +List all clusters on your default region ``` scw k8s cluster list ``` @@ -235,7 +235,7 @@ scw k8s cluster list region=fr-par name=cluster1 ### List available versions for a cluster -This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work. +List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work. **Usage:** @@ -248,14 +248,14 @@ scw k8s cluster list-available-versions [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster which the available Kuberentes versions will be listed from | +| cluster-id | Required | ID of the cluster which the available Kuberentes versions will be listed from | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -List all available versions for a given cluster to upgrade to +List all available versions for a cluster to upgrade to ``` scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ``` @@ -265,7 +265,7 @@ scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ### Reset the admin token of a cluster -This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used). +Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster. **Usage:** @@ -278,14 +278,14 @@ scw k8s cluster reset-admin-token [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster of which the admin token will be renewed | +| cluster-id | Required | ID of the cluster on which the admin token will be renewed | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Reset the admin token for a given cluster +Reset the admin token for a cluster ``` scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ``` @@ -295,7 +295,7 @@ scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ### Update a cluster -This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster. +Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint. **Usage:** @@ -308,10 +308,10 @@ scw k8s cluster update [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to update | -| name | | The new name of the cluster | -| description | | The new description of the cluster | -| tags.{index} | | The new tags associated with the cluster | +| cluster-id | Required | ID of the cluster to update | +| name | | New external name of the cluster | +| description | | New description of the cluster | +| tags.{index} | | New tags associated with the cluster | | autoscaler-config.scale-down-disabled | | Disable the cluster autoscaler | | autoscaler-config.scale-down-delay-after-add | | How long after scale up that scale down evaluation resumes | | autoscaler-config.estimator | One of: `unknown_estimator`, `binpacking` | Type of resource estimator to be used in scale up | @@ -322,11 +322,11 @@ scw k8s cluster update [arg=value ...] | autoscaler-config.scale-down-unneeded-time | | How long a node should be unneeded before it is eligible for scale down | | autoscaler-config.scale-down-utilization-threshold | | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down | | autoscaler-config.max-graceful-termination-sec | | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node | -| ~~enable-dashboard~~ | Deprecated | The new value of the Kubernetes Dashboard enablement | -| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | The new Ingress Controller for the cluster | +| ~~enable-dashboard~~ | Deprecated | New value of the Kubernetes Dashboard enablement | +| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | New Ingress Controller for the cluster | | auto-upgrade.enable | | Whether or not auto upgrade is enabled for the cluster | -| auto-upgrade.maintenance-window.start-hour | | The start hour of the 2-hour maintenance window | -| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | The day of the week for the maintenance window | +| auto-upgrade.maintenance-window.start-hour | | Start time of the two-hour maintenance window | +| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | Day of the week for the maintenance window | | feature-gates.{index} | | List of feature gates to enable | | admission-plugins.{index} | | List of admission plugins to enable | | open-id-connect-config.issuer-url | | URL of the provider which allows the API server to discover public signing keys | @@ -335,7 +335,7 @@ scw k8s cluster update [arg=value ...] | open-id-connect-config.username-prefix | | Prefix prepended to username | | open-id-connect-config.groups-claim.{index} | | JWT claim to use as the user's group | | open-id-connect-config.groups-prefix | | Prefix prepended to group claims | -| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID Token | +| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID token | | apiserver-cert-sans.{index} | | Additional Subject Alternative Names for the Kubernetes API server certificate | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -343,12 +343,12 @@ scw k8s cluster update [arg=value ...] **Examples:** -Enable dashboard on a given cluster +Enable dashboard on a cluster ``` scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true ``` -Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster +Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster ``` scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion ``` @@ -358,7 +358,7 @@ scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterF ### Upgrade a cluster -This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. +Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. **Usage:** @@ -371,21 +371,21 @@ scw k8s cluster upgrade [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to upgrade | -| version | Required | The new Kubernetes version of the cluster | -| upgrade-pools | | The enablement of the pools upgrade | +| cluster-id | Required | ID of the cluster to upgrade | +| version | Required | New Kubernetes version of the cluster | +| upgrade-pools | | Enablement of the pools upgrade | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools) +Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools) ``` scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 ``` -Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools) +Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools) ``` scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true ``` @@ -523,15 +523,13 @@ scw k8s kubeconfig uninstall 11111111-1111-1111-1111-111111111111 ## Kapsule node management commands -A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. +A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. ### Delete a node in a cluster -This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications. **Usage:** @@ -544,7 +542,7 @@ scw k8s node delete [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to replace | +| node-id | Required | ID of the node to replace | | skip-drain | | Skip draining node from its workload | | replace | | Add a new node after the deletion of this node | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -553,17 +551,17 @@ scw k8s node delete [arg=value ...] **Examples:** -Delete a given node +Delete a node ``` scw k8s node delete 11111111-1111-1111-111111111111 ``` -Delete a given node without evicting workloads +Delete a node without evicting workloads ``` scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true ``` -Replace a given node by a new one +Replace a node by a new one ``` scw k8s node delete 11111111-1111-1111-111111111111 replace=true ``` @@ -573,7 +571,7 @@ scw k8s node delete 11111111-1111-1111-111111111111 replace=true ### Get a node in a cluster -This method allows to get details about a specific Kubernetes node. +Get details about a specific Kubernetes node. **Usage:** @@ -586,14 +584,14 @@ scw k8s node get [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the requested node | +| node-id | Required | ID of the requested node | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Get a given node +Get a node ``` scw k8s node get 11111111-1111-1111-111111111111 ``` @@ -603,7 +601,7 @@ scw k8s node get 11111111-1111-1111-111111111111 ### List all the nodes in a cluster -This method allows to list all the existing nodes for a specific Kubernetes cluster. +List all the existing nodes for a specific Kubernetes cluster. **Usage:** @@ -616,28 +614,28 @@ scw k8s node list [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The cluster ID from which the nodes will be listed from | -| pool-id | | The pool ID on which to filter the returned nodes | -| order-by | One of: `created_at_asc`, `created_at_desc` | The sort order of the returned nodes | -| name | | The name on which to filter the returned nodes | -| status | One of: `unknown`, `creating`, `not_ready`, `ready`, `deleting`, `deleted`, `locked`, `rebooting`, `creation_error`, `upgrading`, `starting`, `registering` | The status on which to filter the returned nodes | +| cluster-id | Required | Cluster ID from which the nodes will be listed from | +| pool-id | | Pool ID on which to filter the returned nodes | +| order-by | One of: `created_at_asc`, `created_at_desc` | Sort order of the returned nodes | +| name | | Name on which to filter the returned nodes | +| status | One of: `unknown`, `creating`, `not_ready`, `ready`, `deleting`, `deleted`, `locked`, `rebooting`, `creation_error`, `upgrading`, `starting`, `registering` | Status on which to filter the returned nodes | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all the nodes in the given cluster +List all the nodes in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 ``` -List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster +List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222 ``` -List all ready nodes in the given cluster +List all ready nodes in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ``` @@ -647,7 +645,7 @@ scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ### Reboot a node in a cluster -This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications. **Usage:** @@ -660,14 +658,14 @@ scw k8s node reboot [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to reboot | +| node-id | Required | ID of the node to reboot | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Reboot a given node +Reboot a node ``` scw k8s node reboot 11111111-1111-1111-111111111111 ``` @@ -677,7 +675,7 @@ scw k8s node reboot 11111111-1111-1111-111111111111 ### Replace a node in a cluster -This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. **Usage:** @@ -690,14 +688,14 @@ scw k8s node replace [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to replace | +| node-id | Required | ID of the node to replace | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Replace a given node +Replace a node ``` scw k8s node replace 11111111-1111-1111-111111111111 ``` @@ -737,15 +735,13 @@ scw k8s node wait 11111111-1111-1111-1111-111111111111 ## Kapsule pool management commands -A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. +A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. ### Create a new pool in a cluster -This method allows to create a new pool in a specific Kubernetes cluster. +Create a new pool in a specific Kubernetes cluster. **Usage:** @@ -758,40 +754,40 @@ scw k8s pool create [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster in which the pool will be created | -| name | Required
Default: `` | The name of the pool | -| node-type | Required
Default: `DEV1-M` | The node type is the type of Scaleway Instance wanted for the pool | -| placement-group-id | | The placement group ID in which all the nodes of the pool will be created | -| autoscaling | | The enablement of the autoscaling feature for the pool | -| size | Required
Default: `1` | The size (number of nodes) of the pool | -| min-size | | The minimum size of the pool | -| max-size | | The maximum size of the pool | -| container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | The container runtime for the nodes of the pool | -| autohealing | | The enablement of the autohealing feature for the pool | -| tags.{index} | | The tags associated with the pool | -| kubelet-args.{key} | | The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| cluster-id | Required | ID of the cluster in which the pool will be created | +| name | Required
Default: `` | Name of the pool | +| node-type | Required
Default: `DEV1-M` | Node type is the type of Scaleway Instance wanted for the pool | +| placement-group-id | | Placement group ID in which all the nodes of the pool will be created | +| autoscaling | | Defines whether the autoscaling feature is enabled for the pool | +| size | Required
Default: `1` | Size (number of nodes) of the pool | +| min-size | | Minimum size of the pool | +| max-size | | Maximum size of the pool | +| container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | Container runtime for the nodes of the pool | +| autohealing | | Defines whether the autohealing feature is enabled for the pool | +| tags.{index} | | Tags associated with the pool | +| kubelet-args.{key} | | Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | upgrade-policy.max-unavailable | | | | upgrade-policy.max-surge | | | -| zone | | The Zone in which the Pool's node will be spawn in | -| root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | The system volume disk type | -| root-volume-size | | The system volume disk size | +| zone | | Zone in which the pool's nodes will be spawned | +| root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | System volume disk type | +| root-volume-size | | System volume disk size | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Create a pool named bar with 2 DEV1-XL on a given cluster +Create a pool named bar with 2 DEV1-XL on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2 ``` -Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster +Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd ``` -Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster +Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group ``` @@ -801,7 +797,7 @@ scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node- ### Delete a pool in a cluster -This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it. +Delete a specific pool from a cluster. All of the pool's nodes will also be deleted. **Usage:** @@ -814,14 +810,14 @@ scw k8s pool delete [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to delete | +| pool-id | Required | ID of the pool to delete | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Delete a given pool +Delete a specific pool ``` scw k8s pool delete 11111111-1111-1111-111111111111 ``` @@ -831,7 +827,7 @@ scw k8s pool delete 11111111-1111-1111-111111111111 ### Get a pool in a cluster -This method allows to get details about a specific pool. +Get details about a specific pool in a Kubernetes cluster. **Usage:** @@ -844,7 +840,7 @@ scw k8s pool get [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the requested pool | +| pool-id | Required | ID of the requested pool | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -861,7 +857,7 @@ scw k8s pool get 11111111-1111-1111-111111111111 ### List all the pools in a cluster -This method allows to list all the existing pools for a specific Kubernetes cluster. +List all the existing pools for a specific Kubernetes cluster. **Usage:** @@ -874,32 +870,32 @@ scw k8s pool list [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster from which the pools will be listed from | -| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | The sort order of the returned pools | -| name | | The name on which to filter the returned pools | -| status | One of: `unknown`, `ready`, `deleting`, `deleted`, `scaling`, `warning`, `locked`, `upgrading` | The status on which to filter the returned pools | +| cluster-id | Required | ID of the cluster from which the pools will be listed from | +| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | Sort order of the returned pools | +| name | | Name on which to filter the returned pools | +| status | One of: `unknown`, `ready`, `deleting`, `deleted`, `scaling`, `warning`, `locked`, `upgrading` | Status on which to filter the returned pools | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all pools for a given cluster +List all pools for a cluster ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 ``` -List all scaling pools for a given cluster +List all scaling pools for a cluster ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling ``` -List all pools for a given cluster that contain the word foo in the pool name +List all pools for a cluster that contains the word 'foo' in the pool name ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo ``` -List all pools for a given cluster and order them by ascending creation date +List all pools for a cluster and order them by ascending creation date ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc ``` @@ -909,7 +905,7 @@ scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at ### Update a pool in a cluster -This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ... +Update attributes of a specific pool, such as size, autoscaling settings, and tags. **Usage:** @@ -922,14 +918,14 @@ scw k8s pool update [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to update | -| autoscaling | | The new value for the enablement of autoscaling for the pool | -| size | | The new size for the pool | -| min-size | | The new minimun size for the pool | -| max-size | | The new maximum size for the pool | -| autohealing | | The new value for the enablement of autohealing for the pool | -| tags.{index} | | The new tags associated with the pool | -| kubelet-args.{key} | | The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| pool-id | Required | ID of the pool to update | +| autoscaling | | New value for the enablement of autoscaling for the pool | +| size | | New size for the pool | +| min-size | | New minimun size for the pool | +| max-size | | New maximum size for the pool | +| autohealing | | New value for the enablement of autohealing for the pool | +| tags.{index} | | New tags associated with the pool | +| kubelet-args.{key} | | New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | upgrade-policy.max-unavailable | | | | upgrade-policy.max-surge | | | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -958,7 +954,7 @@ scw k8s pool update 11111111-1111-1111-111111111111 tags.0=my tags.1=new tags.2= ### Upgrade a pool in a cluster -This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. +Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. **Usage:** @@ -971,15 +967,15 @@ scw k8s pool upgrade [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to upgrade | -| version | Required | The new Kubernetes version for the pool | +| pool-id | Required | ID of the pool to upgrade | +| version | Required | New Kubernetes version for the pool | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Upgrade a given pool to the Kubernetes version 1.24.7 +Upgrade a specific pool to the Kubernetes version 1.24.7 ``` scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7 ``` @@ -1019,17 +1015,13 @@ scw k8s pool wait 11111111-1111-1111-1111-111111111111 ## Available Kubernetes version commands -A version is a vanilla Kubernetes version like `x.y.z`. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. +A version is a vanilla Kubernetes version like `x.y.z`. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. ### Get details about a specific version -This method allows to get a specific Kubernetes version and the details about the version. +Get a specific Kubernetes version and the details about the version. **Usage:** @@ -1042,7 +1034,7 @@ scw k8s version get [arg=value ...] | Name | | Description | |------|---|-------------| -| version-name | Required | The requested version name | +| version-name | Required | Requested version name | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -1059,7 +1051,7 @@ scw k8s version get 1.24.7 ### List all available versions -This method allows to list all available versions for the creation of a new Kubernetes cluster. +List all available versions for the creation of a new Kubernetes cluster. **Usage:** diff --git a/internal/namespaces/k8s/v1/k8s_cli.go b/internal/namespaces/k8s/v1/k8s_cli.go index 39b5d38b9b..1ce5e12f1c 100644 --- a/internal/namespaces/k8s/v1/k8s_cli.go +++ b/internal/namespaces/k8s/v1/k8s_cli.go @@ -70,9 +70,7 @@ It is composed of different pools, each pool containing the same kind of nodes. func k8sPool() *core.Command { return &core.Command{ Short: `Kapsule pool management commands`, - Long: `A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. + Long: `A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. `, Namespace: "k8s", Resource: "pool", @@ -82,9 +80,7 @@ A "default pool" is automatically created with every cluster. func k8sNode() *core.Command { return &core.Command{ Short: `Kapsule node management commands`, - Long: `A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. + Long: `A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. `, Namespace: "k8s", Resource: "node", @@ -94,11 +90,7 @@ Please note that Kubernetes nodes cannot be accessed with ssh. func k8sVersion() *core.Command { return &core.Command{ Short: `Available Kubernetes version commands`, - Long: `A version is a vanilla Kubernetes version like ` + "`" + `x.y.z` + "`" + `. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. + Long: `A version is a vanilla Kubernetes version like ` + "`" + `x.y.z` + "`" + `. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. `, Namespace: "k8s", Resource: "version", @@ -107,8 +99,8 @@ Also each version have a different set of container runtimes, CNIs, ingresses, f func k8sClusterList() *core.Command { return &core.Command{ - Short: `List all the clusters`, - Long: `This method allows to list all the existing Kubernetes clusters in an account.`, + Short: `List all clusters`, + Long: `List all the existing Kubernetes clusters in a specific Region.`, Namespace: "k8s", Resource: "cluster", Verb: "list", @@ -117,14 +109,14 @@ func k8sClusterList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "project-id", - Short: `The project ID on which to filter the returned clusters`, + Short: `Project ID on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned clusters`, + Short: `Sort order of the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -132,14 +124,14 @@ func k8sClusterList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned clusters`, + Short: `Name on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned clusters`, + Short: `Status on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -147,14 +139,14 @@ func k8sClusterList() *core.Command { }, { Name: "type", - Short: `The type on which to filter the returned clusters`, + Short: `Type on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "organization-id", - Short: `The organization ID on which to filter the returned clusters`, + Short: `Organization ID on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -180,7 +172,7 @@ func k8sClusterList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all the clusters on your default region", + Short: "List all clusters on your default region", ArgsJSON: `null`, }, { @@ -239,7 +231,7 @@ func k8sClusterList() *core.Command { func k8sClusterCreate() *core.Command { return &core.Command{ Short: `Create a new cluster`, - Long: `This method allows to create a new Kubernetes cluster on an account.`, + Long: `Creates a new Kubernetes cluster on a Scaleway account.`, Namespace: "k8s", Resource: "cluster", Verb: "create", @@ -249,14 +241,14 @@ func k8sClusterCreate() *core.Command { core.ProjectIDArgSpec(), { Name: "type", - Short: `The type of the cluster`, + Short: `Type of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "name", - Short: `The name of the cluster`, + Short: `Name of the cluster`, Required: true, Deprecated: false, Positional: false, @@ -264,28 +256,28 @@ func k8sClusterCreate() *core.Command { }, { Name: "description", - Short: `The description of the cluster`, + Short: `Description of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The tags associated with the cluster`, + Short: `Tags associated with the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "version", - Short: `The Kubernetes version of the cluster`, + Short: `Kubernetes version of the cluster`, Required: true, Deprecated: false, Positional: false, }, { Name: "cni", - Short: `The Container Network Interface (CNI) plugin that will run in the cluster`, + Short: `Container Network Interface (CNI) plugin that will run in the cluster`, Required: true, Deprecated: false, Positional: false, @@ -293,14 +285,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "enable-dashboard", - Short: `The enablement of the Kubernetes Dashboard in the cluster`, + Short: `Defines if the Kubernetes Dashboard is enabled in the cluster`, Required: false, Deprecated: true, Positional: false, }, { Name: "ingress", - Short: `The Ingress Controller that will run in the cluster`, + Short: `Ingress Controller that will run in the cluster`, Required: false, Deprecated: true, Positional: false, @@ -308,56 +300,56 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.name", - Short: `The name of the pool`, + Short: `Name of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.node-type", - Short: `The node type is the type of Scaleway Instance wanted for the pool`, + Short: `Node type is the type of Scaleway Instance wanted for the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.placement-group-id", - Short: `The placement group ID in which all the nodes of the pool will be created`, + Short: `Placement group ID in which all the nodes of the pool will be created`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.autoscaling", - Short: `The enablement of the autoscaling feature for the pool`, + Short: `Defines whether the autoscaling feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.size", - Short: `The size (number of nodes) of the pool`, + Short: `Size (number of nodes) of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.min-size", - Short: `The minimum size of the pool`, + Short: `Minimum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.max-size", - Short: `The maximum size of the pool`, + Short: `Maximum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.container-runtime", - Short: `The container runtime for the nodes of the pool`, + Short: `Container runtime for the nodes of the pool`, Required: false, Deprecated: false, Positional: false, @@ -365,21 +357,21 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.autohealing", - Short: `The enablement of the autohealing feature for the pool`, + Short: `Defines whether the autohealing feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.tags.{index}", - Short: `The tags associated with the pool`, + Short: `Tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.kubelet-args.{key}", - Short: `The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -400,14 +392,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.zone", - Short: `The Zone in which the Pool's node will be spawn in`, + Short: `Zone in which the pool's nodes will be spawned`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.root-volume-type", - Short: `The system volume disk type`, + Short: `System volume disk type`, Required: false, Deprecated: false, Positional: false, @@ -415,7 +407,7 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.root-volume-size", - Short: `The system volume disk size`, + Short: `System volume disk size`, Required: false, Deprecated: false, Positional: false, @@ -501,14 +493,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "auto-upgrade.maintenance-window.start-hour", - Short: `The start hour of the 2-hour maintenance window`, + Short: `Start time of the two-hour maintenance window`, Required: false, Deprecated: false, Positional: false, }, { Name: "auto-upgrade.maintenance-window.day", - Short: `The day of the week for the maintenance window`, + Short: `Day of the week for the maintenance window`, Required: false, Deprecated: false, Positional: false, @@ -572,7 +564,7 @@ func k8sClusterCreate() *core.Command { }, { Name: "open-id-connect-config.required-claim.{index}", - Short: `Multiple key=value pairs that describes a required claim in the ID Token`, + Short: `Multiple key=value pairs that describes a required claim in the ID token`, Required: false, Deprecated: false, Positional: false, @@ -610,8 +602,8 @@ func k8sClusterCreate() *core.Command { func k8sClusterGet() *core.Command { return &core.Command{ - Short: `Get a cluster`, - Long: `This method allows to get details about a specific Kubernetes cluster.`, + Short: `Get specific cluster information`, + Long: `Get details about a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "get", @@ -637,7 +629,7 @@ func k8sClusterGet() *core.Command { }, Examples: []*core.Example{ { - Short: "Get a given cluster", + Short: "Get a cluster information", Raw: `scw k8s cluster get 11111111-1111-1111-111111111111`, }, }, @@ -647,7 +639,7 @@ func k8sClusterGet() *core.Command { func k8sClusterUpdate() *core.Command { return &core.Command{ Short: `Update a cluster`, - Long: `This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster.`, + Long: `Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint.`, Namespace: "k8s", Resource: "cluster", Verb: "update", @@ -656,28 +648,28 @@ func k8sClusterUpdate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to update`, + Short: `ID of the cluster to update`, Required: true, Deprecated: false, Positional: true, }, { Name: "name", - Short: `The new name of the cluster`, + Short: `New external name of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "description", - Short: `The new description of the cluster`, + Short: `New description of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The new tags associated with the cluster`, + Short: `New tags associated with the cluster`, Required: false, Deprecated: false, Positional: false, @@ -756,14 +748,14 @@ func k8sClusterUpdate() *core.Command { }, { Name: "enable-dashboard", - Short: `The new value of the Kubernetes Dashboard enablement`, + Short: `New value of the Kubernetes Dashboard enablement`, Required: false, Deprecated: true, Positional: false, }, { Name: "ingress", - Short: `The new Ingress Controller for the cluster`, + Short: `New Ingress Controller for the cluster`, Required: false, Deprecated: true, Positional: false, @@ -778,14 +770,14 @@ func k8sClusterUpdate() *core.Command { }, { Name: "auto-upgrade.maintenance-window.start-hour", - Short: `The start hour of the 2-hour maintenance window`, + Short: `Start time of the two-hour maintenance window`, Required: false, Deprecated: false, Positional: false, }, { Name: "auto-upgrade.maintenance-window.day", - Short: `The day of the week for the maintenance window`, + Short: `Day of the week for the maintenance window`, Required: false, Deprecated: false, Positional: false, @@ -849,7 +841,7 @@ func k8sClusterUpdate() *core.Command { }, { Name: "open-id-connect-config.required-claim.{index}", - Short: `Multiple key=value pairs that describes a required claim in the ID Token`, + Short: `Multiple key=value pairs that describes a required claim in the ID token`, Required: false, Deprecated: false, Positional: false, @@ -873,11 +865,11 @@ func k8sClusterUpdate() *core.Command { }, Examples: []*core.Example{ { - Short: "Enable dashboard on a given cluster", + Short: "Enable dashboard on a cluster", Raw: `scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true`, }, { - Short: "Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster", + Short: "Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster", Raw: `scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion`, }, }, @@ -887,7 +879,7 @@ func k8sClusterUpdate() *core.Command { func k8sClusterDelete() *core.Command { return &core.Command{ Short: `Delete a cluster`, - Long: `This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster.`, + Long: `Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "delete", @@ -896,7 +888,7 @@ func k8sClusterDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to delete`, + Short: `ID of the cluster to delete`, Required: true, Deprecated: false, Positional: true, @@ -920,7 +912,7 @@ func k8sClusterDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given cluster", + Short: "Delete a cluster", Raw: `scw k8s cluster delete 11111111-1111-1111-111111111111`, }, }, @@ -930,7 +922,7 @@ func k8sClusterDelete() *core.Command { func k8sClusterUpgrade() *core.Command { return &core.Command{ Short: `Upgrade a cluster`, - Long: `This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version.`, + Long: `Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version.`, Namespace: "k8s", Resource: "cluster", Verb: "upgrade", @@ -939,21 +931,21 @@ func k8sClusterUpgrade() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to upgrade`, + Short: `ID of the cluster to upgrade`, Required: true, Deprecated: false, Positional: true, }, { Name: "version", - Short: `The new Kubernetes version of the cluster`, + Short: `New Kubernetes version of the cluster`, Required: true, Deprecated: false, Positional: false, }, { Name: "upgrade-pools", - Short: `The enablement of the pools upgrade`, + Short: `Enablement of the pools upgrade`, Required: false, Deprecated: false, Positional: false, @@ -970,11 +962,11 @@ func k8sClusterUpgrade() *core.Command { }, Examples: []*core.Example{ { - Short: "Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools)", + Short: "Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools)", Raw: `scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7`, }, { - Short: "Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools)", + Short: "Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools)", Raw: `scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true`, }, }, @@ -984,7 +976,7 @@ func k8sClusterUpgrade() *core.Command { func k8sClusterListAvailableVersions() *core.Command { return &core.Command{ Short: `List available versions for a cluster`, - Long: `This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work.`, + Long: `List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work.`, Namespace: "k8s", Resource: "cluster", Verb: "list-available-versions", @@ -993,7 +985,7 @@ func k8sClusterListAvailableVersions() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster which the available Kuberentes versions will be listed from`, + Short: `ID of the cluster which the available Kuberentes versions will be listed from`, Required: true, Deprecated: false, Positional: true, @@ -1010,7 +1002,7 @@ func k8sClusterListAvailableVersions() *core.Command { }, Examples: []*core.Example{ { - Short: "List all available versions for a given cluster to upgrade to", + Short: "List all available versions for a cluster to upgrade to", Raw: `scw k8s cluster list-available-versions 11111111-1111-1111-111111111111`, }, }, @@ -1034,7 +1026,7 @@ func k8sClusterListAvailableVersions() *core.Command { func k8sClusterResetAdminToken() *core.Command { return &core.Command{ Short: `Reset the admin token of a cluster`, - Long: `This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used).`, + Long: `Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "reset-admin-token", @@ -1043,7 +1035,7 @@ func k8sClusterResetAdminToken() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster of which the admin token will be renewed`, + Short: `ID of the cluster on which the admin token will be renewed`, Required: true, Deprecated: false, Positional: true, @@ -1066,7 +1058,7 @@ func k8sClusterResetAdminToken() *core.Command { }, Examples: []*core.Example{ { - Short: "Reset the admin token for a given cluster", + Short: "Reset the admin token for a cluster", Raw: `scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111`, }, }, @@ -1076,7 +1068,7 @@ func k8sClusterResetAdminToken() *core.Command { func k8sPoolList() *core.Command { return &core.Command{ Short: `List all the pools in a cluster`, - Long: `This method allows to list all the existing pools for a specific Kubernetes cluster.`, + Long: `List all the existing pools for a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "list", @@ -1085,14 +1077,14 @@ func k8sPoolList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster from which the pools will be listed from`, + Short: `ID of the cluster from which the pools will be listed from`, Required: true, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned pools`, + Short: `Sort order of the returned pools`, Required: false, Deprecated: false, Positional: false, @@ -1100,14 +1092,14 @@ func k8sPoolList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned pools`, + Short: `Name on which to filter the returned pools`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned pools`, + Short: `Status on which to filter the returned pools`, Required: false, Deprecated: false, Positional: false, @@ -1134,19 +1126,19 @@ func k8sPoolList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all pools for a given cluster", + Short: "List all pools for a cluster", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111`, }, { - Short: "List all scaling pools for a given cluster", + Short: "List all scaling pools for a cluster", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling`, }, { - Short: "List all pools for a given cluster that contain the word foo in the pool name", + Short: "List all pools for a cluster that contains the word 'foo' in the pool name", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo`, }, { - Short: "List all pools for a given cluster and order them by ascending creation date", + Short: "List all pools for a cluster and order them by ascending creation date", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc`, }, }, @@ -1215,7 +1207,7 @@ func k8sPoolList() *core.Command { func k8sPoolCreate() *core.Command { return &core.Command{ Short: `Create a new pool in a cluster`, - Long: `This method allows to create a new pool in a specific Kubernetes cluster.`, + Long: `Create a new pool in a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "create", @@ -1224,14 +1216,14 @@ func k8sPoolCreate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster in which the pool will be created`, + Short: `ID of the cluster in which the pool will be created`, Required: true, Deprecated: false, Positional: false, }, { Name: "name", - Short: `The name of the pool`, + Short: `Name of the pool`, Required: true, Deprecated: false, Positional: false, @@ -1239,49 +1231,49 @@ func k8sPoolCreate() *core.Command { }, { Name: "node-type", - Short: `The node type is the type of Scaleway Instance wanted for the pool`, + Short: `Node type is the type of Scaleway Instance wanted for the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "placement-group-id", - Short: `The placement group ID in which all the nodes of the pool will be created`, + Short: `Placement group ID in which all the nodes of the pool will be created`, Required: false, Deprecated: false, Positional: false, }, { Name: "autoscaling", - Short: `The enablement of the autoscaling feature for the pool`, + Short: `Defines whether the autoscaling feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "size", - Short: `The size (number of nodes) of the pool`, + Short: `Size (number of nodes) of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "min-size", - Short: `The minimum size of the pool`, + Short: `Minimum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "max-size", - Short: `The maximum size of the pool`, + Short: `Maximum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "container-runtime", - Short: `The container runtime for the nodes of the pool`, + Short: `Container runtime for the nodes of the pool`, Required: false, Deprecated: false, Positional: false, @@ -1289,21 +1281,21 @@ func k8sPoolCreate() *core.Command { }, { Name: "autohealing", - Short: `The enablement of the autohealing feature for the pool`, + Short: `Defines whether the autohealing feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The tags associated with the pool`, + Short: `Tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "kubelet-args.{key}", - Short: `The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -1322,14 +1314,14 @@ func k8sPoolCreate() *core.Command { }, { Name: "zone", - Short: `The Zone in which the Pool's node will be spawn in`, + Short: `Zone in which the pool's nodes will be spawned`, Required: false, Deprecated: false, Positional: false, }, { Name: "root-volume-type", - Short: `The system volume disk type`, + Short: `System volume disk type`, Required: false, Deprecated: false, Positional: false, @@ -1337,7 +1329,7 @@ func k8sPoolCreate() *core.Command { }, { Name: "root-volume-size", - Short: `The system volume disk size`, + Short: `System volume disk size`, Required: false, Deprecated: false, Positional: false, @@ -1354,15 +1346,15 @@ func k8sPoolCreate() *core.Command { }, Examples: []*core.Example{ { - Short: "Create a pool named bar with 2 DEV1-XL on a given cluster", + Short: "Create a pool named bar with 2 DEV1-XL on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2`, }, { - Short: "Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster", + Short: "Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd`, }, { - Short: "Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster", + Short: "Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group`, }, }, @@ -1372,7 +1364,7 @@ func k8sPoolCreate() *core.Command { func k8sPoolGet() *core.Command { return &core.Command{ Short: `Get a pool in a cluster`, - Long: `This method allows to get details about a specific pool.`, + Long: `Get details about a specific pool in a Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "get", @@ -1381,7 +1373,7 @@ func k8sPoolGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the requested pool`, + Short: `ID of the requested pool`, Required: true, Deprecated: false, Positional: true, @@ -1408,7 +1400,7 @@ func k8sPoolGet() *core.Command { func k8sPoolUpgrade() *core.Command { return &core.Command{ Short: `Upgrade a pool in a cluster`, - Long: `This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster.`, + Long: `Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster.`, Namespace: "k8s", Resource: "pool", Verb: "upgrade", @@ -1417,14 +1409,14 @@ func k8sPoolUpgrade() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to upgrade`, + Short: `ID of the pool to upgrade`, Required: true, Deprecated: false, Positional: true, }, { Name: "version", - Short: `The new Kubernetes version for the pool`, + Short: `New Kubernetes version for the pool`, Required: true, Deprecated: false, Positional: false, @@ -1441,7 +1433,7 @@ func k8sPoolUpgrade() *core.Command { }, Examples: []*core.Example{ { - Short: "Upgrade a given pool to the Kubernetes version 1.24.7", + Short: "Upgrade a specific pool to the Kubernetes version 1.24.7", Raw: `scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7`, }, }, @@ -1451,7 +1443,7 @@ func k8sPoolUpgrade() *core.Command { func k8sPoolUpdate() *core.Command { return &core.Command{ Short: `Update a pool in a cluster`, - Long: `This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ...`, + Long: `Update attributes of a specific pool, such as size, autoscaling settings, and tags.`, Namespace: "k8s", Resource: "pool", Verb: "update", @@ -1460,56 +1452,56 @@ func k8sPoolUpdate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to update`, + Short: `ID of the pool to update`, Required: true, Deprecated: false, Positional: true, }, { Name: "autoscaling", - Short: `The new value for the enablement of autoscaling for the pool`, + Short: `New value for the enablement of autoscaling for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "size", - Short: `The new size for the pool`, + Short: `New size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "min-size", - Short: `The new minimun size for the pool`, + Short: `New minimun size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "max-size", - Short: `The new maximum size for the pool`, + Short: `New maximum size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "autohealing", - Short: `The new value for the enablement of autohealing for the pool`, + Short: `New value for the enablement of autohealing for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The new tags associated with the pool`, + Short: `New tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "kubelet-args.{key}", - Short: `The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -1556,7 +1548,7 @@ func k8sPoolUpdate() *core.Command { func k8sPoolDelete() *core.Command { return &core.Command{ Short: `Delete a pool in a cluster`, - Long: `This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it.`, + Long: `Delete a specific pool from a cluster. All of the pool's nodes will also be deleted.`, Namespace: "k8s", Resource: "pool", Verb: "delete", @@ -1565,7 +1557,7 @@ func k8sPoolDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to delete`, + Short: `ID of the pool to delete`, Required: true, Deprecated: false, Positional: true, @@ -1582,7 +1574,7 @@ func k8sPoolDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given pool", + Short: "Delete a specific pool", Raw: `scw k8s pool delete 11111111-1111-1111-111111111111`, }, }, @@ -1592,7 +1584,7 @@ func k8sPoolDelete() *core.Command { func k8sNodeList() *core.Command { return &core.Command{ Short: `List all the nodes in a cluster`, - Long: `This method allows to list all the existing nodes for a specific Kubernetes cluster.`, + Long: `List all the existing nodes for a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "node", Verb: "list", @@ -1601,21 +1593,21 @@ func k8sNodeList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The cluster ID from which the nodes will be listed from`, + Short: `Cluster ID from which the nodes will be listed from`, Required: true, Deprecated: false, Positional: false, }, { Name: "pool-id", - Short: `The pool ID on which to filter the returned nodes`, + Short: `Pool ID on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned nodes`, + Short: `Sort order of the returned nodes`, Required: false, Deprecated: false, Positional: false, @@ -1623,14 +1615,14 @@ func k8sNodeList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned nodes`, + Short: `Name on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned nodes`, + Short: `Status on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, @@ -1657,15 +1649,15 @@ func k8sNodeList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all the nodes in the given cluster", + Short: "List all the nodes in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111`, }, { - Short: "List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster", + Short: "List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222`, }, { - Short: "List all ready nodes in the given cluster", + Short: "List all ready nodes in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready`, }, }, @@ -1707,7 +1699,7 @@ func k8sNodeList() *core.Command { func k8sNodeGet() *core.Command { return &core.Command{ Short: `Get a node in a cluster`, - Long: `This method allows to get details about a specific Kubernetes node.`, + Long: `Get details about a specific Kubernetes node.`, Namespace: "k8s", Resource: "node", Verb: "get", @@ -1716,7 +1708,7 @@ func k8sNodeGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the requested node`, + Short: `ID of the requested node`, Required: true, Deprecated: false, Positional: true, @@ -1733,7 +1725,7 @@ func k8sNodeGet() *core.Command { }, Examples: []*core.Example{ { - Short: "Get a given node", + Short: "Get a node", Raw: `scw k8s node get 11111111-1111-1111-111111111111`, }, }, @@ -1743,7 +1735,7 @@ func k8sNodeGet() *core.Command { func k8sNodeReplace() *core.Command { return &core.Command{ Short: `Replace a node in a cluster`, - Long: `This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "replace", @@ -1752,7 +1744,7 @@ func k8sNodeReplace() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to replace`, + Short: `ID of the node to replace`, Required: true, Deprecated: false, Positional: true, @@ -1769,7 +1761,7 @@ func k8sNodeReplace() *core.Command { }, Examples: []*core.Example{ { - Short: "Replace a given node", + Short: "Replace a node", Raw: `scw k8s node replace 11111111-1111-1111-111111111111`, }, }, @@ -1779,7 +1771,7 @@ func k8sNodeReplace() *core.Command { func k8sNodeReboot() *core.Command { return &core.Command{ Short: `Reboot a node in a cluster`, - Long: `This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "reboot", @@ -1788,7 +1780,7 @@ func k8sNodeReboot() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to reboot`, + Short: `ID of the node to reboot`, Required: true, Deprecated: false, Positional: true, @@ -1805,7 +1797,7 @@ func k8sNodeReboot() *core.Command { }, Examples: []*core.Example{ { - Short: "Reboot a given node", + Short: "Reboot a node", Raw: `scw k8s node reboot 11111111-1111-1111-111111111111`, }, }, @@ -1815,7 +1807,7 @@ func k8sNodeReboot() *core.Command { func k8sNodeDelete() *core.Command { return &core.Command{ Short: `Delete a node in a cluster`, - Long: `This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "delete", @@ -1824,7 +1816,7 @@ func k8sNodeDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to replace`, + Short: `ID of the node to replace`, Required: true, Deprecated: false, Positional: true, @@ -1855,15 +1847,15 @@ func k8sNodeDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given node", + Short: "Delete a node", Raw: `scw k8s node delete 11111111-1111-1111-111111111111`, }, { - Short: "Delete a given node without evicting workloads", + Short: "Delete a node without evicting workloads", Raw: `scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true`, }, { - Short: "Replace a given node by a new one", + Short: "Replace a node by a new one", Raw: `scw k8s node delete 11111111-1111-1111-111111111111 replace=true`, }, }, @@ -1873,7 +1865,7 @@ func k8sNodeDelete() *core.Command { func k8sVersionList() *core.Command { return &core.Command{ Short: `List all available versions`, - Long: `This method allows to list all available versions for the creation of a new Kubernetes cluster.`, + Long: `List all available versions for the creation of a new Kubernetes cluster.`, Namespace: "k8s", Resource: "version", Verb: "list", @@ -1925,7 +1917,7 @@ func k8sVersionList() *core.Command { func k8sVersionGet() *core.Command { return &core.Command{ Short: `Get details about a specific version`, - Long: `This method allows to get a specific Kubernetes version and the details about the version.`, + Long: `Get a specific Kubernetes version and the details about the version.`, Namespace: "k8s", Resource: "version", Verb: "get", @@ -1934,7 +1926,7 @@ func k8sVersionGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "version-name", - Short: `The requested version name`, + Short: `Requested version name`, Required: true, Deprecated: false, Positional: true,