From 07262ec59ee4e37bcc7445a1d00279794007ae02 Mon Sep 17 00:00:00 2001 From: Feidias Ioannidis Date: Thu, 9 Oct 2025 17:13:36 +0000 Subject: [PATCH 1/2] Pass if the cluster is a pathways cluster to Kueue Manager to provision Kueue flavors accordingly --- goldens/Basic_cluster_create.txt | 95 +++++++++++++++++ goldens/Cluster_create_private.txt | 106 ++++++++++++++++++- goldens/Cluster_create_with_gb200-4.txt | 95 +++++++++++++++++ goldens/NAP_cluster-create.txt | 95 +++++++++++++++++ goldens/NAP_cluster-create_with_pathways.txt | 106 ++++++++++++++++++- src/xpk/commands/cluster.py | 1 + src/xpk/core/kueue_manager.py | 3 + 7 files changed, 499 insertions(+), 2 deletions(-) diff --git a/goldens/Basic_cluster_create.txt b/goldens/Basic_cluster_create.txt index 3d81f2f98..17711906f 100644 --- a/goldens/Basic_cluster_create.txt +++ b/goldens/Basic_cluster_create.txt @@ -79,6 +79,101 @@ kubectl get deployment kueue-controller-manager -n kueue-system -o jsonpath='{.s kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-sigs/kueue/releases/download/v0.12.2/manifests.yaml [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m +[XPK] Applying following Kueue resources: + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "1xtpu7x-8" +spec: + nodeLabels: {"cloud.google.com/gke-tpu-accelerator": "tpu7x", "cloud.google.com/gke-tpu-topology": "2x2x1"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: AdmissionCheck +metadata: + name: dws-prov +spec: + controllerName: kueue.x-k8s.io/provisioning-request + parameters: + apiGroup: kueue.x-k8s.io + kind: ProvisioningRequestConfig + name: dws-config +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ProvisioningRequestConfig +metadata: + name: dws-config +spec: + provisioningClassName: queued-provisioning.gke.io + podSetUpdates: + nodeSelector: + - key: autoscaling.gke.io/provisioning-request + valueFromProvisioningClassDetail: ResizeRequestName + managedResources: + - google.com/tpu +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "cluster-queue" +spec: + preemption: + reclaimWithinCohort: Never # Don't preempt other queues in the cohort. + withinClusterQueue: LowerPriority + namespaceSelector: {} # match all. + resourceGroups: [{'coveredResources': ['google.com/tpu'], 'flavors': [{'name': '1xtpu7x-8', 'resources': [{'name': 'google.com/tpu', 'nominalQuota': 4}]}]}] + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + namespace: default + name: multislice-queue +spec: + clusterQueue: cluster-queue +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-low +value: 100 +globalDefault: false +description: "Very Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low +value: 250 +globalDefault: false +description: "Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: medium +value: 500 +globalDefault: false +description: "Medium" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high +value: 750 +globalDefault: false +description: "High" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-high +value: 1000 +globalDefault: false +description: "Very High" [XPK] Task: `Applying Kueue Custom Resources` is implemented by the following command not running since it is a dry run. kubectl apply -f a1fe8e014a200d6489b8871301a9e80de7e6f45e94b61ad0e60f40f254711bec [XPK] Task: `Count total nodes` is implemented by the following command not running since it is a dry run. diff --git a/goldens/Cluster_create_private.txt b/goldens/Cluster_create_private.txt index 4a1042530..2d70c868f 100644 --- a/goldens/Cluster_create_private.txt +++ b/goldens/Cluster_create_private.txt @@ -84,8 +84,112 @@ kubectl get deployment kueue-controller-manager -n kueue-system -o jsonpath='{.s kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-sigs/kueue/releases/download/v0.12.2/manifests.yaml [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m +[XPK] Applying following Kueue resources: + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "1xv5p-8" +spec: + nodeLabels: {"cloud.google.com/gke-tpu-accelerator": "tpu-v5p-slice", "cloud.google.com/gke-tpu-topology": "2x2x1"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "cpu-user" +spec: + nodeLabels: {"cloud.google.com/gke-nodepool": "cpu-np"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: AdmissionCheck +metadata: + name: dws-prov +spec: + controllerName: kueue.x-k8s.io/provisioning-request + parameters: + apiGroup: kueue.x-k8s.io + kind: ProvisioningRequestConfig + name: dws-config +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ProvisioningRequestConfig +metadata: + name: dws-config +spec: + provisioningClassName: queued-provisioning.gke.io + podSetUpdates: + nodeSelector: + - key: autoscaling.gke.io/provisioning-request + valueFromProvisioningClassDetail: ResizeRequestName + managedResources: + - google.com/tpu +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "cluster-queue" +spec: + preemption: + reclaimWithinCohort: Never # Don't preempt other queues in the cohort. + withinClusterQueue: LowerPriority + namespaceSelector: {} # match all. + resourceGroups: [{'coveredResources': ['google.com/tpu'], 'flavors': [{'name': '1xv5p-8', 'resources': [{'name': 'google.com/tpu', 'nominalQuota': 4}]}]}, {'coveredResources': ['cpu', 'memory'], 'flavors': [{'name': 'cpu-user', 'resources': [{'name': 'cpu', 'nominalQuota': 480}, {'name': 'memory', 'nominalQuota': '2000G'}]}]}] + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + namespace: default + name: multislice-queue +spec: + clusterQueue: cluster-queue +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-low +value: 100 +globalDefault: false +description: "Very Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low +value: 250 +globalDefault: false +description: "Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: medium +value: 500 +globalDefault: false +description: "Medium" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high +value: 750 +globalDefault: false +description: "High" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-high +value: 1000 +globalDefault: false +description: "Very High" [XPK] Task: `Applying Kueue Custom Resources` is implemented by the following command not running since it is a dry run. -kubectl apply -f 02867423642d631296009c1c55aee0eb89304e530f89f1f7beecb629fef962c7 +kubectl apply -f 0ff2bce892606d1497f21fc7b2cea78a4ee103094ce0f509211f3f9730536ad6 [XPK] Task: `Count total nodes` is implemented by the following command not running since it is a dry run. kubectl get node --no-headers | wc -l [XPK] Try 1: Updating Kueue Controller Manager resources diff --git a/goldens/Cluster_create_with_gb200-4.txt b/goldens/Cluster_create_with_gb200-4.txt index 8ae7d06e4..60cd62458 100644 --- a/goldens/Cluster_create_with_gb200-4.txt +++ b/goldens/Cluster_create_with_gb200-4.txt @@ -83,6 +83,101 @@ kubectl get deployment kueue-controller-manager -n kueue-system -o jsonpath='{.s kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-sigs/kueue/releases/download/v0.12.2/manifests.yaml [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m +[XPK] Applying following Kueue resources: + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "1xgb200-4" +spec: + nodeLabels: {"cloud.google.com/gke-accelerator": "nvidia-gb200"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: AdmissionCheck +metadata: + name: dws-prov +spec: + controllerName: kueue.x-k8s.io/provisioning-request + parameters: + apiGroup: kueue.x-k8s.io + kind: ProvisioningRequestConfig + name: dws-config +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ProvisioningRequestConfig +metadata: + name: dws-config +spec: + provisioningClassName: queued-provisioning.gke.io + podSetUpdates: + nodeSelector: + - key: autoscaling.gke.io/provisioning-request + valueFromProvisioningClassDetail: ResizeRequestName + managedResources: + - nvidia.com/gpu +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "cluster-queue" +spec: + preemption: + reclaimWithinCohort: Never # Don't preempt other queues in the cohort. + withinClusterQueue: LowerPriority + namespaceSelector: {} # match all. + resourceGroups: [{'coveredResources': ['nvidia.com/gpu'], 'flavors': [{'name': '1xgb200-4', 'resources': [{'name': 'nvidia.com/gpu', 'nominalQuota': 8}]}]}] + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + namespace: default + name: multislice-queue +spec: + clusterQueue: cluster-queue +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-low +value: 100 +globalDefault: false +description: "Very Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low +value: 250 +globalDefault: false +description: "Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: medium +value: 500 +globalDefault: false +description: "Medium" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high +value: 750 +globalDefault: false +description: "High" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-high +value: 1000 +globalDefault: false +description: "Very High" [XPK] Task: `Applying Kueue Custom Resources` is implemented by the following command not running since it is a dry run. kubectl apply -f f807069b73747a423ec0d1915b2e919cfde400b01654de15746b566709b80f7e [XPK] Task: `Count total nodes` is implemented by the following command not running since it is a dry run. diff --git a/goldens/NAP_cluster-create.txt b/goldens/NAP_cluster-create.txt index d5f396f5f..f268f3fac 100644 --- a/goldens/NAP_cluster-create.txt +++ b/goldens/NAP_cluster-create.txt @@ -90,6 +90,101 @@ kubectl get deployment kueue-controller-manager -n kueue-system -o jsonpath='{.s kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-sigs/kueue/releases/download/v0.12.2/manifests.yaml [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m +[XPK] Applying following Kueue resources: + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "1xtpu7x-8" +spec: + nodeLabels: {"cloud.google.com/gke-tpu-accelerator": "tpu7x"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: AdmissionCheck +metadata: + name: dws-prov +spec: + controllerName: kueue.x-k8s.io/provisioning-request + parameters: + apiGroup: kueue.x-k8s.io + kind: ProvisioningRequestConfig + name: dws-config +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ProvisioningRequestConfig +metadata: + name: dws-config +spec: + provisioningClassName: queued-provisioning.gke.io + podSetUpdates: + nodeSelector: + - key: autoscaling.gke.io/provisioning-request + valueFromProvisioningClassDetail: ResizeRequestName + managedResources: + - google.com/tpu +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "cluster-queue" +spec: + preemption: + reclaimWithinCohort: Never # Don't preempt other queues in the cohort. + withinClusterQueue: LowerPriority + namespaceSelector: {} # match all. + resourceGroups: [{'coveredResources': ['google.com/tpu'], 'flavors': [{'name': '1xtpu7x-8', 'resources': [{'name': 'google.com/tpu', 'nominalQuota': 4}]}]}] + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + namespace: default + name: multislice-queue +spec: + clusterQueue: cluster-queue +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-low +value: 100 +globalDefault: false +description: "Very Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low +value: 250 +globalDefault: false +description: "Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: medium +value: 500 +globalDefault: false +description: "Medium" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high +value: 750 +globalDefault: false +description: "High" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-high +value: 1000 +globalDefault: false +description: "Very High" [XPK] Task: `Applying Kueue Custom Resources` is implemented by the following command not running since it is a dry run. kubectl apply -f e5fccf0957dcb7f60400bb4e28ce8c5fc251a9aeb6d67793dc119554d13dc900 [XPK] Task: `Count total nodes` is implemented by the following command not running since it is a dry run. diff --git a/goldens/NAP_cluster-create_with_pathways.txt b/goldens/NAP_cluster-create_with_pathways.txt index 5944de17a..45173a824 100644 --- a/goldens/NAP_cluster-create_with_pathways.txt +++ b/goldens/NAP_cluster-create_with_pathways.txt @@ -91,8 +91,112 @@ kubectl get deployment kueue-controller-manager -n kueue-system -o jsonpath='{.s kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-sigs/kueue/releases/download/v0.12.2/manifests.yaml [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m +[XPK] Applying following Kueue resources: + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "1xtpu7x-8" +spec: + nodeLabels: {"cloud.google.com/gke-tpu-accelerator": "tpu7x"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "cpu-user" +spec: + nodeLabels: {"cloud.google.com/gke-nodepool": "cpu-np"} + + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: AdmissionCheck +metadata: + name: dws-prov +spec: + controllerName: kueue.x-k8s.io/provisioning-request + parameters: + apiGroup: kueue.x-k8s.io + kind: ProvisioningRequestConfig + name: dws-config +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ProvisioningRequestConfig +metadata: + name: dws-config +spec: + provisioningClassName: queued-provisioning.gke.io + podSetUpdates: + nodeSelector: + - key: autoscaling.gke.io/provisioning-request + valueFromProvisioningClassDetail: ResizeRequestName + managedResources: + - google.com/tpu +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "cluster-queue" +spec: + preemption: + reclaimWithinCohort: Never # Don't preempt other queues in the cohort. + withinClusterQueue: LowerPriority + namespaceSelector: {} # match all. + resourceGroups: [{'coveredResources': ['google.com/tpu'], 'flavors': [{'name': '1xtpu7x-8', 'resources': [{'name': 'google.com/tpu', 'nominalQuota': 4}]}]}, {'coveredResources': ['cpu', 'memory'], 'flavors': [{'name': 'cpu-user', 'resources': [{'name': 'cpu', 'nominalQuota': 480}, {'name': 'memory', 'nominalQuota': '2000G'}]}]}] + +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + namespace: default + name: multislice-queue +spec: + clusterQueue: cluster-queue +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-low +value: 100 +globalDefault: false +description: "Very Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low +value: 250 +globalDefault: false +description: "Low" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: medium +value: 500 +globalDefault: false +description: "Medium" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high +value: 750 +globalDefault: false +description: "High" +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: very-high +value: 1000 +globalDefault: false +description: "Very High" [XPK] Task: `Applying Kueue Custom Resources` is implemented by the following command not running since it is a dry run. -kubectl apply -f e5fccf0957dcb7f60400bb4e28ce8c5fc251a9aeb6d67793dc119554d13dc900 +kubectl apply -f edda4d2ffefedaabd6f77eb6d07a05c1bac829ed90a4c3983b21ded280136557 [XPK] Task: `Count total nodes` is implemented by the following command not running since it is a dry run. kubectl get node --no-headers | wc -l [XPK] Try 1: Updating Kueue Controller Manager resources diff --git a/src/xpk/commands/cluster.py b/src/xpk/commands/cluster.py index 7ca174214..f21ba929a 100644 --- a/src/xpk/commands/cluster.py +++ b/src/xpk/commands/cluster.py @@ -1219,6 +1219,7 @@ def install_kueue(args, system: SystemCharacteristics, autoprovisioning_config): num_slices=args.num_slices, memory_limit=args.memory_limit, cpu_limit=args.cpu_limit, + is_pathways_cluster=args.enable_pathways, ), ) diff --git a/src/xpk/core/kueue_manager.py b/src/xpk/core/kueue_manager.py index 3e3328975..55a29944c 100644 --- a/src/xpk/core/kueue_manager.py +++ b/src/xpk/core/kueue_manager.py @@ -19,6 +19,7 @@ from typing import Optional, List, Dict, Any import json from jinja2 import Environment, FileSystemLoader +from ..utils.execution_context import is_dry_run from .capacity import B200_DEVICE_TYPE, H100_MEGA_DEVICE_TYPE, H200_DEVICE_TYPE from .scheduling import ( @@ -330,6 +331,8 @@ def __build_template_context( def __apply_manifest(self, manifest: str) -> int: task = "Applying Kueue Custom Resources" + if is_dry_run(): + xpk_print(f"Applying following Kueue resources:\n{manifest}") tmp_file = write_tmp_file(manifest) command = f"kubectl apply -f {tmp_file}" return run_command_with_updates(command, task) From 0d76d37f97ac491c909111ce45037c369345ed6d Mon Sep 17 00:00:00 2001 From: Feidias Ioannidis Date: Fri, 10 Oct 2025 08:16:53 +0000 Subject: [PATCH 2/2] Remove new line --- goldens/Basic_cluster_create.txt | 1 - goldens/Cluster_create_private.txt | 1 - goldens/Cluster_create_with_gb200-4.txt | 1 - goldens/NAP_cluster-create.txt | 1 - goldens/NAP_cluster-create_with_pathways.txt | 1 - src/xpk/core/kueue_manager.py | 2 +- 6 files changed, 1 insertion(+), 6 deletions(-) diff --git a/goldens/Basic_cluster_create.txt b/goldens/Basic_cluster_create.txt index 17711906f..9dc8aafb6 100644 --- a/goldens/Basic_cluster_create.txt +++ b/goldens/Basic_cluster_create.txt @@ -80,7 +80,6 @@ kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-s [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m [XPK] Applying following Kueue resources: - --- apiVersion: kueue.x-k8s.io/v1beta1 kind: ResourceFlavor diff --git a/goldens/Cluster_create_private.txt b/goldens/Cluster_create_private.txt index 2d70c868f..a92cdd5bf 100644 --- a/goldens/Cluster_create_private.txt +++ b/goldens/Cluster_create_private.txt @@ -85,7 +85,6 @@ kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-s [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m [XPK] Applying following Kueue resources: - --- apiVersion: kueue.x-k8s.io/v1beta1 kind: ResourceFlavor diff --git a/goldens/Cluster_create_with_gb200-4.txt b/goldens/Cluster_create_with_gb200-4.txt index 60cd62458..070d91d1c 100644 --- a/goldens/Cluster_create_with_gb200-4.txt +++ b/goldens/Cluster_create_with_gb200-4.txt @@ -84,7 +84,6 @@ kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-s [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m [XPK] Applying following Kueue resources: - --- apiVersion: kueue.x-k8s.io/v1beta1 kind: ResourceFlavor diff --git a/goldens/NAP_cluster-create.txt b/goldens/NAP_cluster-create.txt index f268f3fac..dbdab1a55 100644 --- a/goldens/NAP_cluster-create.txt +++ b/goldens/NAP_cluster-create.txt @@ -91,7 +91,6 @@ kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-s [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m [XPK] Applying following Kueue resources: - --- apiVersion: kueue.x-k8s.io/v1beta1 kind: ResourceFlavor diff --git a/goldens/NAP_cluster-create_with_pathways.txt b/goldens/NAP_cluster-create_with_pathways.txt index 45173a824..a6e69a411 100644 --- a/goldens/NAP_cluster-create_with_pathways.txt +++ b/goldens/NAP_cluster-create_with_pathways.txt @@ -92,7 +92,6 @@ kubectl apply --server-side --force-conflicts -f https://github.com/kubernetes-s [XPK] Task: `Wait for Kueue to be available` is implemented by the following command not running since it is a dry run. kubectl wait deploy/kueue-controller-manager -nkueue-system --for=condition=available --timeout=10m [XPK] Applying following Kueue resources: - --- apiVersion: kueue.x-k8s.io/v1beta1 kind: ResourceFlavor diff --git a/src/xpk/core/kueue_manager.py b/src/xpk/core/kueue_manager.py index 55a29944c..298a3aefe 100644 --- a/src/xpk/core/kueue_manager.py +++ b/src/xpk/core/kueue_manager.py @@ -332,7 +332,7 @@ def __build_template_context( def __apply_manifest(self, manifest: str) -> int: task = "Applying Kueue Custom Resources" if is_dry_run(): - xpk_print(f"Applying following Kueue resources:\n{manifest}") + xpk_print(f"Applying following Kueue resources:{manifest}") tmp_file = write_tmp_file(manifest) command = f"kubectl apply -f {tmp_file}" return run_command_with_updates(command, task)