From 85a71c5c4d77d63d356170012573ba6f67152bde Mon Sep 17 00:00:00 2001 From: Anish Asthana Date: Thu, 25 May 2023 19:51:31 +0000 Subject: [PATCH] Standardize imports of github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1 Rename instances of rayiov1alpha1 to rayv1alpha1 Update empty imports of github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1 to be imported as rayv1alpha1 Signed-off-by: Anish Asthana --- .../ray/batchscheduler/interface/interface.go | 10 +- .../ray/batchscheduler/schedulermanager.go | 4 +- .../volcano/volcano_scheduler.go | 16 +-- .../volcano/volcano_scheduler_test.go | 10 +- .../controllers/ray/common/ingress.go | 10 +- .../controllers/ray/common/ingress_test.go | 14 +-- ray-operator/controllers/ray/common/pod.go | 60 +++++----- .../controllers/ray/common/pod_test.go | 104 +++++++++--------- ray-operator/controllers/ray/common/rbac.go | 8 +- .../controllers/ray/common/rbac_test.go | 22 ++-- .../controllers/ray/common/service.go | 26 ++--- .../controllers/ray/common/service_test.go | 10 +- .../controllers/ray/raycluster_controller.go | 84 +++++++------- .../ray/raycluster_controller_fake_test.go | 28 ++--- .../ray/raycluster_controller_test.go | 20 ++-- .../ray/rayjob_controller_suspended_test.go | 20 ++-- .../controllers/ray/rayjob_controller_test.go | 32 +++--- .../ray/rayservice_controller_test.go | 40 +++---- .../ray/rayservice_controller_unit_test.go | 58 +++++----- ray-operator/controllers/ray/suite_test.go | 4 +- ray-operator/controllers/ray/utils/util.go | 22 ++-- .../controllers/ray/utils/util_test.go | 24 ++-- 22 files changed, 313 insertions(+), 313 deletions(-) diff --git a/ray-operator/controllers/ray/batchscheduler/interface/interface.go b/ray-operator/controllers/ray/batchscheduler/interface/interface.go index dedb592bcf2..82345baffdb 100644 --- a/ray-operator/controllers/ray/batchscheduler/interface/interface.go +++ b/ray-operator/controllers/ray/batchscheduler/interface/interface.go @@ -1,7 +1,7 @@ package schedulerinterface import ( - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" @@ -16,11 +16,11 @@ type BatchScheduler interface { // DoBatchSchedulingOnSubmission handles submitting the RayCluster to the batch scheduler on creation / update // For most batch schedulers, this results in the creation of a PodGroup. - DoBatchSchedulingOnSubmission(app *rayiov1alpha1.RayCluster) error + DoBatchSchedulingOnSubmission(app *rayv1alpha1.RayCluster) error // AddMetadataToPod enriches Pod specs with metadata necessary to tie them to the scheduler. // For example, setting labels for queues / priority, and setting schedulerName. - AddMetadataToPod(app *rayiov1alpha1.RayCluster, pod *v1.Pod) + AddMetadataToPod(app *rayv1alpha1.RayCluster, pod *v1.Pod) } // BatchSchedulerFactory handles initial setup of the scheduler plugin by registering the @@ -49,11 +49,11 @@ func (d *DefaultBatchScheduler) Name() string { return GetDefaultPluginName() } -func (d *DefaultBatchScheduler) DoBatchSchedulingOnSubmission(app *rayiov1alpha1.RayCluster) error { +func (d *DefaultBatchScheduler) DoBatchSchedulingOnSubmission(app *rayv1alpha1.RayCluster) error { return nil } -func (d *DefaultBatchScheduler) AddMetadataToPod(app *rayiov1alpha1.RayCluster, pod *v1.Pod) { +func (d *DefaultBatchScheduler) AddMetadataToPod(app *rayv1alpha1.RayCluster, pod *v1.Pod) { } func (df *DefaultBatchSchedulerFactory) New(config *rest.Config) (BatchScheduler, error) { diff --git a/ray-operator/controllers/ray/batchscheduler/schedulermanager.go b/ray-operator/controllers/ray/batchscheduler/schedulermanager.go index 2be5c1cba6d..650e4abaefb 100644 --- a/ray-operator/controllers/ray/batchscheduler/schedulermanager.go +++ b/ray-operator/controllers/ray/batchscheduler/schedulermanager.go @@ -8,7 +8,7 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/builder" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" schedulerinterface "github.com/ray-project/kuberay/ray-operator/controllers/ray/batchscheduler/interface" "github.com/ray-project/kuberay/ray-operator/controllers/ray/batchscheduler/volcano" "github.com/ray-project/kuberay/ray-operator/controllers/ray/common" @@ -54,7 +54,7 @@ func NewSchedulerManager(config *rest.Config) *SchedulerManager { return &manager } -func (batch *SchedulerManager) GetSchedulerForCluster(app *rayiov1alpha1.RayCluster) (schedulerinterface.BatchScheduler, error) { +func (batch *SchedulerManager) GetSchedulerForCluster(app *rayv1alpha1.RayCluster) (schedulerinterface.BatchScheduler, error) { if schedulerName, ok := app.ObjectMeta.Labels[common.RaySchedulerName]; ok { return batch.GetScheduler(schedulerName) } diff --git a/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler.go b/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler.go index 76cf88ce36d..70878321fe6 100644 --- a/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler.go +++ b/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler.go @@ -12,7 +12,7 @@ import ( "k8s.io/client-go/rest" "github.com/go-logr/logr" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" @@ -48,7 +48,7 @@ func (v *VolcanoBatchScheduler) Name() string { return GetPluginName() } -func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *rayiov1alpha1.RayCluster) error { +func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *rayv1alpha1.RayCluster) error { var minMember int32 var totalResource corev1.ResourceList if app.Spec.EnableInTreeAutoscaling == nil || !*app.Spec.EnableInTreeAutoscaling { @@ -65,11 +65,11 @@ func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *rayiov1alpha1 return nil } -func (v *VolcanoBatchScheduler) getAppPodGroupName(app *rayiov1alpha1.RayCluster) string { +func (v *VolcanoBatchScheduler) getAppPodGroupName(app *rayv1alpha1.RayCluster) string { return fmt.Sprintf("ray-%s-pg", app.Name) } -func (v *VolcanoBatchScheduler) syncPodGroup(app *rayiov1alpha1.RayCluster, size int32, totalResource corev1.ResourceList) error { +func (v *VolcanoBatchScheduler) syncPodGroup(app *rayv1alpha1.RayCluster, size int32, totalResource corev1.ResourceList) error { podGroupName := v.getAppPodGroupName(app) if pg, err := v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Get(context.TODO(), podGroupName, metav1.GetOptions{}); err != nil { if !errors.IsNotFound(err) { @@ -104,7 +104,7 @@ func (v *VolcanoBatchScheduler) syncPodGroup(app *rayiov1alpha1.RayCluster, size } func createPodGroup( - app *rayiov1alpha1.RayCluster, + app *rayv1alpha1.RayCluster, podGroupName string, size int32, totalResource corev1.ResourceList, @@ -114,7 +114,7 @@ func createPodGroup( Namespace: app.Namespace, Name: podGroupName, OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(app, rayiov1alpha1.SchemeGroupVersion.WithKind("RayCluster")), + *metav1.NewControllerRef(app, rayv1alpha1.SchemeGroupVersion.WithKind("RayCluster")), }, }, Spec: v1beta1.PodGroupSpec{ @@ -137,7 +137,7 @@ func createPodGroup( return podGroup } -func (v *VolcanoBatchScheduler) AddMetadataToPod(app *rayiov1alpha1.RayCluster, pod *corev1.Pod) { +func (v *VolcanoBatchScheduler) AddMetadataToPod(app *rayv1alpha1.RayCluster, pod *corev1.Pod) { pod.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) if queue, ok := app.ObjectMeta.Labels[QueueNameLabelKey]; ok { pod.Labels[QueueNameLabelKey] = queue @@ -187,6 +187,6 @@ func (vf *VolcanoBatchSchedulerFactory) ConfigureReconciler(b *builder.Builder) return b. Watches(&source.Kind{Type: &v1beta1.PodGroup{}}, &handler.EnqueueRequestForOwner{ IsController: true, - OwnerType: &rayiov1alpha1.RayCluster{}, + OwnerType: &rayv1alpha1.RayCluster{}, }) } diff --git a/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler_test.go b/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler_test.go index 283a6b9c99b..fa0033a035f 100644 --- a/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler_test.go +++ b/ray-operator/controllers/ray/batchscheduler/volcano/volcano_scheduler_test.go @@ -3,7 +3,7 @@ package volcano import ( "testing" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -52,19 +52,19 @@ func TestCreatePodGroup(t *testing.T) { }, } - cluster := rayiov1alpha1.RayCluster{ + cluster := rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: headSpec, }, Replicas: pointer.Int32Ptr(1), }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Template: corev1.PodTemplateSpec{ Spec: workerSpec, diff --git a/ray-operator/controllers/ray/common/ingress.go b/ray-operator/controllers/ray/common/ingress.go index 1f5c7a8734a..babc343e4c0 100644 --- a/ray-operator/controllers/ray/common/ingress.go +++ b/ray-operator/controllers/ray/common/ingress.go @@ -5,7 +5,7 @@ import ( "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/sirupsen/logrus" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,10 +15,10 @@ const IngressClassAnnotationKey = "kubernetes.io/ingress.class" // BuildIngressForHeadService Builds the ingress for head service dashboard. // This is used to expose dashboard for external traffic. -func BuildIngressForHeadService(cluster rayiov1alpha1.RayCluster) (*networkingv1.Ingress, error) { +func BuildIngressForHeadService(cluster rayv1alpha1.RayCluster) (*networkingv1.Ingress, error) { labels := map[string]string{ RayClusterLabelKey: cluster.Name, - RayIDLabelKey: utils.GenerateIdentifier(cluster.Name, rayiov1alpha1.HeadNode), + RayIDLabelKey: utils.GenerateIdentifier(cluster.Name, rayv1alpha1.HeadNode), KubernetesApplicationNameLabelKey: ApplicationName, KubernetesCreatedByLabelKey: ComponentName, } @@ -94,7 +94,7 @@ func BuildIngressForHeadService(cluster rayiov1alpha1.RayCluster) (*networkingv1 // BuildIngressForRayService Builds the ingress for head service dashboard for RayService. // This is used to expose dashboard for external traffic. // RayService controller updates the ingress whenever a new RayCluster serves the traffic. -func BuildIngressForRayService(service rayiov1alpha1.RayService, cluster rayiov1alpha1.RayCluster) (*networkingv1.Ingress, error) { +func BuildIngressForRayService(service rayv1alpha1.RayService, cluster rayv1alpha1.RayCluster) (*networkingv1.Ingress, error) { ingress, err := BuildIngressForHeadService(cluster) if err != nil { return nil, err @@ -104,7 +104,7 @@ func BuildIngressForRayService(service rayiov1alpha1.RayService, cluster rayiov1 ingress.ObjectMeta.Namespace = service.Namespace ingress.ObjectMeta.Labels = map[string]string{ RayServiceLabelKey: service.Name, - RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(service.Name, rayiov1alpha1.HeadNode)), + RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(service.Name, rayv1alpha1.HeadNode)), } return ingress, nil diff --git a/ray-operator/controllers/ray/common/ingress_test.go b/ray-operator/controllers/ray/common/ingress_test.go index c838e32cab3..6c4d97e82b2 100644 --- a/ray-operator/controllers/ray/common/ingress_test.go +++ b/ray-operator/controllers/ray/common/ingress_test.go @@ -6,7 +6,7 @@ import ( "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/stretchr/testify/assert" @@ -15,7 +15,7 @@ import ( "k8s.io/utils/pointer" ) -var instanceWithIngressEnabled = &rayiov1alpha1.RayCluster{ +var instanceWithIngressEnabled = &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", @@ -23,9 +23,9 @@ var instanceWithIngressEnabled = &rayiov1alpha1.RayCluster{ IngressClassAnnotationKey: "nginx", }, }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.0", - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32Ptr(1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ @@ -43,14 +43,14 @@ var instanceWithIngressEnabled = &rayiov1alpha1.RayCluster{ }, } -var instanceWithIngressEnabledWithoutIngressClass = &rayiov1alpha1.RayCluster{ +var instanceWithIngressEnabledWithoutIngressClass = &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.0", - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32Ptr(1), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ diff --git a/ray-operator/controllers/ray/common/pod.go b/ray-operator/controllers/ray/common/pod.go index 5e153009233..709a298bdfc 100644 --- a/ray-operator/controllers/ray/common/pod.go +++ b/ray-operator/controllers/ray/common/pod.go @@ -9,7 +9,7 @@ import ( "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -52,7 +52,7 @@ func GetHeadPort(headStartParams map[string]string) string { } // rayClusterHAEnabled check if RayCluster enabled FT in annotations -func rayClusterHAEnabled(instance rayiov1alpha1.RayCluster) bool { +func rayClusterHAEnabled(instance rayv1alpha1.RayCluster) bool { if instance.Annotations == nil { return false } @@ -64,7 +64,7 @@ func rayClusterHAEnabled(instance rayiov1alpha1.RayCluster) bool { return false } -func initTemplateAnnotations(instance rayiov1alpha1.RayCluster, podTemplate *v1.PodTemplateSpec) { +func initTemplateAnnotations(instance rayv1alpha1.RayCluster, podTemplate *v1.PodTemplateSpec) { if podTemplate.Annotations == nil { podTemplate.Annotations = make(map[string]string) } @@ -89,7 +89,7 @@ func initTemplateAnnotations(instance rayiov1alpha1.RayCluster, podTemplate *v1. } // DefaultHeadPodTemplate sets the config values -func DefaultHeadPodTemplate(instance rayiov1alpha1.RayCluster, headSpec rayiov1alpha1.HeadGroupSpec, podName string, headPort string) v1.PodTemplateSpec { +func DefaultHeadPodTemplate(instance rayv1alpha1.RayCluster, headSpec rayv1alpha1.HeadGroupSpec, podName string, headPort string) v1.PodTemplateSpec { // TODO (Dmitri) The argument headPort is essentially unused; // headPort is passed into setMissingRayStartParams but unused there for the head pod. // To mitigate this awkwardness and reduce code redundancy, unify head and worker pod configuration logic. @@ -103,8 +103,8 @@ func DefaultHeadPodTemplate(instance rayiov1alpha1.RayCluster, headSpec rayiov1a if podTemplate.Labels == nil { podTemplate.Labels = make(map[string]string) } - podTemplate.Labels = labelPod(rayiov1alpha1.HeadNode, instance.Name, "headgroup", instance.Spec.HeadGroupSpec.Template.ObjectMeta.Labels) - headSpec.RayStartParams = setMissingRayStartParams(headSpec.RayStartParams, rayiov1alpha1.HeadNode, headPort, "") + podTemplate.Labels = labelPod(rayv1alpha1.HeadNode, instance.Name, "headgroup", instance.Spec.HeadGroupSpec.Template.ObjectMeta.Labels) + headSpec.RayStartParams = setMissingRayStartParams(headSpec.RayStartParams, rayv1alpha1.HeadNode, headPort, "") headSpec.RayStartParams = setAgentListPortStartParams(instance, headSpec.RayStartParams) initTemplateAnnotations(instance, &podTemplate) @@ -187,7 +187,7 @@ func getEnableInitContainerInjection() bool { } // DefaultWorkerPodTemplate sets the config values -func DefaultWorkerPodTemplate(instance rayiov1alpha1.RayCluster, workerSpec rayiov1alpha1.WorkerGroupSpec, podName string, fqdnRayIP string, headPort string) v1.PodTemplateSpec { +func DefaultWorkerPodTemplate(instance rayv1alpha1.RayCluster, workerSpec rayv1alpha1.WorkerGroupSpec, podName string, fqdnRayIP string, headPort string) v1.PodTemplateSpec { podTemplate := workerSpec.Template podTemplate.GenerateName = podName if podTemplate.ObjectMeta.Namespace == "" { @@ -229,8 +229,8 @@ func DefaultWorkerPodTemplate(instance rayiov1alpha1.RayCluster, workerSpec rayi if podTemplate.Labels == nil { podTemplate.Labels = make(map[string]string) } - podTemplate.Labels = labelPod(rayiov1alpha1.WorkerNode, instance.Name, workerSpec.GroupName, workerSpec.Template.ObjectMeta.Labels) - workerSpec.RayStartParams = setMissingRayStartParams(workerSpec.RayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + podTemplate.Labels = labelPod(rayv1alpha1.WorkerNode, instance.Name, workerSpec.GroupName, workerSpec.Template.ObjectMeta.Labels) + workerSpec.RayStartParams = setMissingRayStartParams(workerSpec.RayStartParams, rayv1alpha1.WorkerNode, headPort, fqdnRayIP) workerSpec.RayStartParams = setAgentListPortStartParams(instance, workerSpec.RayStartParams) initTemplateAnnotations(instance, &podTemplate) @@ -248,10 +248,10 @@ func DefaultWorkerPodTemplate(instance rayiov1alpha1.RayCluster, workerSpec rayi return podTemplate } -func initLivenessProbeHandler(probe *v1.Probe, rayNodeType rayiov1alpha1.RayNodeType) { +func initLivenessProbeHandler(probe *v1.Probe, rayNodeType rayv1alpha1.RayNodeType) { if probe.Exec == nil { // we only create the probe if user did not specify any. - if rayNodeType == rayiov1alpha1.HeadNode { + if rayNodeType == rayv1alpha1.HeadNode { // head node liveness probe cmd := []string{ "bash", "-c", fmt.Sprintf("wget -T 2 -q -O- http://localhost:%d/%s | grep success", @@ -271,10 +271,10 @@ func initLivenessProbeHandler(probe *v1.Probe, rayNodeType rayiov1alpha1.RayNode } } -func initReadinessProbeHandler(probe *v1.Probe, rayNodeType rayiov1alpha1.RayNodeType) { +func initReadinessProbeHandler(probe *v1.Probe, rayNodeType rayv1alpha1.RayNodeType) { if probe.Exec == nil { // we only create the probe if user did not specify any. - if rayNodeType == rayiov1alpha1.HeadNode { + if rayNodeType == rayv1alpha1.HeadNode { // head node readiness probe cmd := []string{ "bash", "-c", fmt.Sprintf("wget -T 2 -q -O- http://localhost:%d/%s | grep success", @@ -295,7 +295,7 @@ func initReadinessProbeHandler(probe *v1.Probe, rayNodeType rayiov1alpha1.RayNod } // BuildPod a pod config -func BuildPod(podTemplateSpec v1.PodTemplateSpec, rayNodeType rayiov1alpha1.RayNodeType, rayStartParams map[string]string, headPort string, enableRayAutoscaler *bool, creator string, fqdnRayIP string) (aPod v1.Pod) { +func BuildPod(podTemplateSpec v1.PodTemplateSpec, rayNodeType rayv1alpha1.RayNodeType, rayStartParams map[string]string, headPort string, enableRayAutoscaler *bool, creator string, fqdnRayIP string) (aPod v1.Pod) { pod := v1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -308,7 +308,7 @@ func BuildPod(podTemplateSpec v1.PodTemplateSpec, rayNodeType rayiov1alpha1.RayN // Add /dev/shm volumeMount for the object store to avoid performance degradation. addEmptyDir(&pod.Spec.Containers[rayContainerIndex], &pod, SharedMemoryVolumeName, SharedMemoryVolumeMountPath, v1.StorageMediumMemory) - if rayNodeType == rayiov1alpha1.HeadNode && enableRayAutoscaler != nil && *enableRayAutoscaler { + if rayNodeType == rayv1alpha1.HeadNode && enableRayAutoscaler != nil && *enableRayAutoscaler { // The Ray autoscaler writes logs which are read by the Ray head. // We need a shared log volume to enable this information flow. // Specifically, this is required for the event-logging functionality @@ -453,7 +453,7 @@ func BuildAutoscalerContainer(autoscalerImage string) v1.Container { } // Merge the user overrides from autoscalerOptions into the autoscaler container config. -func mergeAutoscalerOverrides(autoscalerContainer *v1.Container, autoscalerOptions *rayiov1alpha1.AutoscalerOptions) { +func mergeAutoscalerOverrides(autoscalerContainer *v1.Container, autoscalerOptions *rayv1alpha1.AutoscalerOptions) { if autoscalerOptions != nil { if autoscalerOptions.Resources != nil { autoscalerContainer.Resources = *autoscalerOptions.Resources @@ -522,7 +522,7 @@ func getAutoscalerContainerIndex(pod v1.Pod) (autoscalerContainerIndex int) { // labelPod returns the labels for selecting the resources // belonging to the given RayCluster CR name. -func labelPod(rayNodeType rayiov1alpha1.RayNodeType, rayClusterName string, groupName string, labels map[string]string) (ret map[string]string) { +func labelPod(rayNodeType rayv1alpha1.RayNodeType, rayClusterName string, groupName string, labels map[string]string) (ret map[string]string) { if labels == nil { labels = make(map[string]string) } @@ -541,7 +541,7 @@ func labelPod(rayNodeType rayiov1alpha1.RayNodeType, rayClusterName string, grou for k, v := range ret { if k == string(rayNodeType) { // overriding invalid values for this label - if v != string(rayiov1alpha1.HeadNode) && v != string(rayiov1alpha1.WorkerNode) { + if v != string(rayv1alpha1.HeadNode) && v != string(rayv1alpha1.WorkerNode) { labels[k] = v } } @@ -573,7 +573,7 @@ func setInitContainerEnvVars(container *v1.Container, fqdnRayIP string) { ) } -func setContainerEnvVars(pod *v1.Pod, rayContainerIndex int, rayNodeType rayiov1alpha1.RayNodeType, rayStartParams map[string]string, fqdnRayIP string, headPort string, creator string) { +func setContainerEnvVars(pod *v1.Pod, rayContainerIndex int, rayNodeType rayv1alpha1.RayNodeType, rayStartParams map[string]string, fqdnRayIP string, headPort string, creator string) { // TODO: Audit all environment variables to identify which should not be modified by users. container := &pod.Spec.Containers[rayContainerIndex] if container.Env == nil || len(container.Env) == 0 { @@ -583,7 +583,7 @@ func setContainerEnvVars(pod *v1.Pod, rayContainerIndex int, rayNodeType rayiov1 // case 1: head => Use LOCAL_HOST // case 2: worker => Use fqdnRayIP (fully qualified domain name) ip := LOCAL_HOST - if rayNodeType == rayiov1alpha1.WorkerNode { + if rayNodeType == rayv1alpha1.WorkerNode { ip = fqdnRayIP container.Env = append(container.Env, v1.EnvVar{Name: FQ_RAY_IP, Value: ip}, @@ -655,7 +655,7 @@ func setContainerEnvVars(pod *v1.Pod, rayContainerIndex int, rayNodeType rayiov1 } } } - if !envVarExists(RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, container.Env) && rayNodeType == rayiov1alpha1.WorkerNode { + if !envVarExists(RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, container.Env) && rayNodeType == rayv1alpha1.WorkerNode { // If GCS FT is enabled and RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S is not set, set the worker's // RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S to 600s. If the worker cannot reconnect to GCS within // 600s, the Raylet will exit the process. By default, the value is 60s, so the head node will @@ -678,16 +678,16 @@ func envVarExists(envName string, envVars []v1.EnvVar) bool { } // TODO auto complete params -func setMissingRayStartParams(rayStartParams map[string]string, nodeType rayiov1alpha1.RayNodeType, headPort string, fqdnRayIP string) (completeStartParams map[string]string) { - // Note: The argument headPort is unused for nodeType == rayiov1alpha1.HeadNode. - if nodeType == rayiov1alpha1.WorkerNode { +func setMissingRayStartParams(rayStartParams map[string]string, nodeType rayv1alpha1.RayNodeType, headPort string, fqdnRayIP string) (completeStartParams map[string]string) { + // Note: The argument headPort is unused for nodeType == rayv1alpha1.HeadNode. + if nodeType == rayv1alpha1.WorkerNode { if _, ok := rayStartParams["address"]; !ok { address := fmt.Sprintf("%s:%s", fqdnRayIP, headPort) rayStartParams["address"] = address } } - if nodeType == rayiov1alpha1.HeadNode { + if nodeType == rayv1alpha1.HeadNode { // Allow incoming connections from all network interfaces for the dashboard by default. // The default value of `dashboard-host` is `localhost` which is not accessible from outside the head Pod. if _, ok := rayStartParams["dashboard-host"]; !ok { @@ -717,7 +717,7 @@ func setMissingRayStartParams(rayStartParams map[string]string, nodeType rayiov1 return rayStartParams } -func setAgentListPortStartParams(instance rayiov1alpha1.RayCluster, rayStartParams map[string]string) (completeStartParams map[string]string) { +func setAgentListPortStartParams(instance rayv1alpha1.RayCluster, rayStartParams map[string]string) (completeStartParams map[string]string) { // add dashboard listen port for serve endpoints to RayService. if _, ok := rayStartParams["dashboard-agent-listen-port"]; !ok { if value, ok := instance.Annotations[EnableAgentServiceKey]; ok && value == EnableAgentServiceTrue { @@ -729,7 +729,7 @@ func setAgentListPortStartParams(instance rayiov1alpha1.RayCluster, rayStartPara } // concatenateContainerCommand with ray start -func concatenateContainerCommand(nodeType rayiov1alpha1.RayNodeType, rayStartParams map[string]string, resource v1.ResourceRequirements) (fullCmd string) { +func concatenateContainerCommand(nodeType rayv1alpha1.RayNodeType, rayStartParams map[string]string, resource v1.ResourceRequirements) (fullCmd string) { if _, ok := rayStartParams["num-cpus"]; !ok { cpu := resource.Limits[v1.ResourceCPU] if !cpu.IsZero() { @@ -758,9 +758,9 @@ func concatenateContainerCommand(nodeType rayiov1alpha1.RayNodeType, rayStartPar log.V(10).Info("concatenate container command", "ray start params", rayStartParams) switch nodeType { - case rayiov1alpha1.HeadNode: + case rayv1alpha1.HeadNode: return fmt.Sprintf("ulimit -n 65536; ray start --head %s", convertParamMap(rayStartParams)) - case rayiov1alpha1.WorkerNode: + case rayv1alpha1.WorkerNode: return fmt.Sprintf("ulimit -n 65536; ray start %s", convertParamMap(rayStartParams)) default: log.Error(fmt.Errorf("missing node type"), "a node must be either head or worker") @@ -889,7 +889,7 @@ func findMemoryReqOrLimit(container v1.Container) (res *resource.Quantity) { // Return a bool indicating the validity of RayStartParams and an err with additional information. // If isValid is true, RayStartParams are valid. Any errors will only affect performance. // If isValid is false, RayStartParams are invalid will result in an unhealthy or failed Ray cluster. -func ValidateHeadRayStartParams(rayHeadGroupSpec rayiov1alpha1.HeadGroupSpec) (isValid bool, err error) { +func ValidateHeadRayStartParams(rayHeadGroupSpec rayv1alpha1.HeadGroupSpec) (isValid bool, err error) { // TODO (dxia): if you add more validation, please split checks into separate subroutines. var objectStoreMemory int64 rayStartParams := rayHeadGroupSpec.RayStartParams diff --git a/ray-operator/controllers/ray/common/pod_test.go b/ray-operator/controllers/ray/common/pod_test.go index 5ca7ed8d3b1..d542b3ebbe0 100644 --- a/ray-operator/controllers/ray/common/pod_test.go +++ b/ray-operator/controllers/ray/common/pod_test.go @@ -15,21 +15,21 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + ray747213v1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" ) var testMemoryLimit = resource.MustParse("1Gi") -var instance = rayiov1alpha1.RayCluster{ +var instance = ray747213v1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: ray747213v1alpha1.RayClusterSpec{ RayVersion: "2.0.0", - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: ray747213v1alpha1.HeadGroupSpec{ Replicas: pointer.Int32Ptr(1), RayStartParams: map[string]string{ "port": "6379", @@ -82,7 +82,7 @@ var instance = rayiov1alpha1.RayCluster{ }, }, }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []ray747213v1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32Ptr(3), MinReplicas: pointer.Int32Ptr(0), @@ -350,9 +350,9 @@ func TestBuildPod(t *testing.T) { cluster := instance.DeepCopy() // Test head pod - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") // Check environment variables rayContainer := pod.Spec.Containers[getRayContainerIndex(pod.Spec)] @@ -373,7 +373,7 @@ func TestBuildPod(t *testing.T) { t.Fatalf("Expected `%v` but got `%v`", expectedResult, actualResult) } actualResult = pod.Labels[RayNodeTypeLabelKey] - expectedResult = string(rayiov1alpha1.HeadNode) + expectedResult = string(ray747213v1alpha1.HeadNode) if !reflect.DeepEqual(expectedResult, actualResult) { t.Fatalf("Expected `%v` but got `%v`", expectedResult, actualResult) } @@ -399,10 +399,10 @@ func TestBuildPod(t *testing.T) { // testing worker pod worker := cluster.Spec.WorkerGroupSpecs[0] - podName = cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName = cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) podTemplateSpec = DefaultWorkerPodTemplate(*cluster, worker, podName, fqdnRayIP, "6379") - pod = BuildPod(podTemplateSpec, rayiov1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) + pod = BuildPod(podTemplateSpec, ray747213v1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) // Check environment variables rayContainer = pod.Spec.Containers[getRayContainerIndex(pod.Spec)] @@ -425,9 +425,9 @@ func TestBuildPod(t *testing.T) { func TestBuildPod_WithAutoscalerEnabled(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.EnableInTreeAutoscaling = &trueFlag - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "") actualResult := pod.Labels[RayClusterLabelKey] expectedResult := cluster.Name @@ -435,7 +435,7 @@ func TestBuildPod_WithAutoscalerEnabled(t *testing.T) { t.Fatalf("Expected `%v` but got `%v`", expectedResult, actualResult) } actualResult = pod.Labels[RayNodeTypeLabelKey] - expectedResult = string(rayiov1alpha1.HeadNode) + expectedResult = string(ray747213v1alpha1.HeadNode) if !reflect.DeepEqual(expectedResult, actualResult) { t.Fatalf("Expected `%v` but got `%v`", expectedResult, actualResult) } @@ -480,9 +480,9 @@ func TestBuildPod_WithAutoscalerEnabled(t *testing.T) { func TestBuildPod_WithCreatedByRayService(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.EnableInTreeAutoscaling = &trueFlag - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, RayServiceCreatorLabelValue, "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, RayServiceCreatorLabelValue, "") hasCorrectDeathEnv := false for _, container := range pod.Spec.Containers { @@ -511,9 +511,9 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) { } // Build a head Pod. - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") // Check environment variable "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S" rayContainerIndex := getRayContainerIndex(pod.Spec) @@ -532,7 +532,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) { cluster.Spec.HeadGroupSpec.Template.Spec.Containers[rayContainerIndex].Env = append(cluster.Spec.HeadGroupSpec.Template.Spec.Containers[rayContainerIndex].Env, v1.EnvVar{Name: RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, Value: "60"}) podTemplateSpec = DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod = BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") + pod = BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") rayContainer = pod.Spec.Containers[rayContainerIndex] // Check environment variable "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S" @@ -546,10 +546,10 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) { // Build a worker pod worker := cluster.Spec.WorkerGroupSpecs[0] - podName = cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName = cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) podTemplateSpec = DefaultWorkerPodTemplate(*cluster, worker, podName, fqdnRayIP, "6379") - pod = BuildPod(podTemplateSpec, rayiov1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) + pod = BuildPod(podTemplateSpec, ray747213v1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) // Check the default value of "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S" rayContainer = pod.Spec.Containers[rayContainerIndex] @@ -566,7 +566,7 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) { v1.EnvVar{Name: RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S, Value: "120"}) worker = cluster.Spec.WorkerGroupSpecs[0] podTemplateSpec = DefaultWorkerPodTemplate(*cluster, worker, podName, fqdnRayIP, "6379") - pod = BuildPod(podTemplateSpec, rayiov1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) + pod = BuildPod(podTemplateSpec, ray747213v1alpha1.WorkerNode, worker.RayStartParams, "6379", nil, "", fqdnRayIP) // Check the default value of "RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S" rayContainer = pod.Spec.Containers[rayContainerIndex] @@ -577,12 +577,12 @@ func TestBuildPod_WithGcsFtEnabled(t *testing.T) { func TestBuildPodWithAutoscalerOptions(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.EnableInTreeAutoscaling = &trueFlag - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) customAutoscalerImage := "custom-autoscaler-xxx" customPullPolicy := v1.PullIfNotPresent customTimeout := int32(100) - customUpscaling := rayiov1alpha1.UpscalingMode("Aggressive") + customUpscaling := ray747213v1alpha1.UpscalingMode("Aggressive") customResources := v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1"), @@ -612,7 +612,7 @@ func TestBuildPodWithAutoscalerOptions(t *testing.T) { SeccompProfile: &seccompProfile, } - cluster.Spec.AutoscalerOptions = &rayiov1alpha1.AutoscalerOptions{ + cluster.Spec.AutoscalerOptions = &ray747213v1alpha1.AutoscalerOptions{ UpscalingMode: &customUpscaling, IdleTimeoutSeconds: &customTimeout, Image: &customAutoscalerImage, @@ -623,7 +623,7 @@ func TestBuildPodWithAutoscalerOptions(t *testing.T) { SecurityContext: &customSecurityContext, } podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", &trueFlag, "", "") expectedContainer := *autoscalerContainer.DeepCopy() expectedContainer.Image = customAutoscalerImage expectedContainer.ImagePullPolicy = customPullPolicy @@ -641,7 +641,7 @@ func TestBuildPodWithAutoscalerOptions(t *testing.T) { func TestHeadPodTemplate_WithAutoscalingEnabled(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.EnableInTreeAutoscaling = &trueFlag - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") // autoscaler container is injected into head pod @@ -672,7 +672,7 @@ func TestHeadPodTemplate_WithAutoscalingEnabled(t *testing.T) { // the head pod's service account should be an empty string. func TestHeadPodTemplate_WithNoServiceAccount(t *testing.T) { cluster := instance.DeepCopy() - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) pod := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") actualResult := pod.Spec.ServiceAccountName @@ -688,7 +688,7 @@ func TestHeadPodTemplate_WithServiceAccountNoAutoscaling(t *testing.T) { cluster := instance.DeepCopy() serviceAccount := "head-service-account" cluster.Spec.HeadGroupSpec.Template.Spec.ServiceAccountName = serviceAccount - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) pod := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") actualResult := pod.Spec.ServiceAccountName @@ -705,7 +705,7 @@ func TestHeadPodTemplate_WithServiceAccount(t *testing.T) { serviceAccount := "head-service-account" cluster.Spec.HeadGroupSpec.Template.Spec.ServiceAccountName = serviceAccount cluster.Spec.EnableInTreeAutoscaling = &trueFlag - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) pod := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") actualResult := pod.Spec.ServiceAccountName @@ -761,9 +761,9 @@ func TestCleanupInvalidVolumeMounts(t *testing.T) { cluster := instance.DeepCopy() // Test head pod - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") - pod := BuildPod(podTemplateSpec, rayiov1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") + pod := BuildPod(podTemplateSpec, ray747213v1alpha1.HeadNode, cluster.Spec.HeadGroupSpec.RayStartParams, "6379", nil, "", "") pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, []v1.VolumeMount{ { @@ -787,7 +787,7 @@ func TestDefaultWorkerPodTemplateWithName(t *testing.T) { fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) worker := cluster.Spec.WorkerGroupSpecs[0] worker.Template.ObjectMeta.Name = "ray-worker-test" - podName := cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName := cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) expectedWorker := *worker.DeepCopy() // Pass a deep copy of worker (*worker.DeepCopy()) to prevent "worker" from updating. @@ -811,7 +811,7 @@ func containerPortExists(ports []v1.ContainerPort, name string, containerPort in func TestDefaultHeadPodTemplateWithConfigurablePorts(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.HeadGroupSpec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{} - podName := strings.ToLower(cluster.Name + DashSymbol + string(rayiov1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) + podName := strings.ToLower(cluster.Name + DashSymbol + string(ray747213v1alpha1.HeadNode) + DashSymbol + utils.FormatInt32(0)) podTemplateSpec := DefaultHeadPodTemplate(*cluster, cluster.Spec.HeadGroupSpec, podName, "6379") // DefaultHeadPodTemplate will add the default metrics port if user doesn't specify it. // Verify the default metrics port exists. @@ -835,7 +835,7 @@ func TestDefaultWorkerPodTemplateWithConfigurablePorts(t *testing.T) { cluster := instance.DeepCopy() cluster.Spec.WorkerGroupSpecs[0].Template.Spec.Containers[0].Ports = []v1.ContainerPort{} worker := cluster.Spec.WorkerGroupSpecs[0] - podName := cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName := cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) podTemplateSpec := DefaultWorkerPodTemplate(*cluster, worker, podName, fqdnRayIP, "6379") // DefaultWorkerPodTemplate will add the default metrics port if user doesn't specify it. @@ -861,7 +861,7 @@ func TestDefaultInitContainer(t *testing.T) { cluster := instance.DeepCopy() fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) worker := cluster.Spec.WorkerGroupSpecs[0] - podName := cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName := cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) expectedResult := len(cluster.Spec.WorkerGroupSpecs[0].Template.Spec.InitContainers) + 1 // Pass a deep copy of worker (*worker.DeepCopy()) to prevent "worker" from updating. @@ -895,7 +895,7 @@ func TestDefaultInitContainerImagePullPolicy(t *testing.T) { cluster := instance.DeepCopy() fqdnRayIP := utils.GenerateFQDNServiceName(cluster.Name, cluster.Namespace) worker := cluster.Spec.WorkerGroupSpecs[0] - podName := cluster.Name + DashSymbol + string(rayiov1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) + podName := cluster.Name + DashSymbol + string(ray747213v1alpha1.WorkerNode) + DashSymbol + worker.GroupName + DashSymbol + utils.FormatInt32(0) cases := []struct { name string @@ -944,23 +944,23 @@ func TestSetMissingRayStartParamsAddress(t *testing.T) { // Case 1: Head node with no address option set. rayStartParams := map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.NotContains(t, rayStartParams, "address", "Head node should not have an address option set by default.") // Case 2: Head node with custom address option set. rayStartParams = map[string]string{"address": customAddress} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, customAddress, rayStartParams["address"], fmt.Sprintf("Expected `%v` but got `%v`", customAddress, rayStartParams["address"])) // Case 3: Worker node with no address option set. rayStartParams = map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) expectedAddress := fmt.Sprintf("%s:%s", fqdnRayIP, headPort) assert.Equal(t, expectedAddress, rayStartParams["address"], fmt.Sprintf("Expected `%v` but got `%v`", expectedAddress, rayStartParams["address"])) // Case 4: Worker node with custom address option set. rayStartParams = map[string]string{"address": customAddress} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, customAddress, rayStartParams["address"], fmt.Sprintf("Expected `%v` but got `%v`", customAddress, rayStartParams["address"])) } @@ -975,22 +975,22 @@ func TestSetMissingRayStartParamsMetricsExportPort(t *testing.T) { // Case 1: Head node with no metrics-export-port option set. rayStartParams := map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, fmt.Sprint(DefaultMetricsPort), rayStartParams["metrics-export-port"], fmt.Sprintf("Expected `%v` but got `%v`", fmt.Sprint(DefaultMetricsPort), rayStartParams["metrics-export-port"])) // Case 2: Head node with custom metrics-export-port option set. rayStartParams = map[string]string{"metrics-export-port": fmt.Sprint(customMetricsPort)} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, fmt.Sprint(customMetricsPort), rayStartParams["metrics-export-port"], fmt.Sprintf("Expected `%v` but got `%v`", fmt.Sprint(customMetricsPort), rayStartParams["metrics-export-port"])) // Case 3: Worker node with no metrics-export-port option set. rayStartParams = map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, fmt.Sprint(DefaultMetricsPort), rayStartParams["metrics-export-port"], fmt.Sprintf("Expected `%v` but got `%v`", fmt.Sprint(DefaultMetricsPort), rayStartParams["metrics-export-port"])) // Case 4: Worker node with custom metrics-export-port option set. rayStartParams = map[string]string{"metrics-export-port": fmt.Sprint(customMetricsPort)} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, fmt.Sprint(customMetricsPort), rayStartParams["metrics-export-port"], fmt.Sprintf("Expected `%v` but got `%v`", fmt.Sprint(customMetricsPort), rayStartParams["metrics-export-port"])) } @@ -1004,22 +1004,22 @@ func TestSetMissingRayStartParamsBlock(t *testing.T) { // Case 1: Head node with no --block option set. rayStartParams := map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, "true", rayStartParams["block"], fmt.Sprintf("Expected `%v` but got `%v`", "true", rayStartParams["block"])) // Case 2: Head node with --block option set to false. rayStartParams = map[string]string{"block": "false"} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, "false", rayStartParams["block"], fmt.Sprintf("Expected `%v` but got `%v`", "false", rayStartParams["block"])) // Case 3: Worker node with no --block option set. rayStartParams = map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, "true", rayStartParams["block"], fmt.Sprintf("Expected `%v` but got `%v`", "true", rayStartParams["block"])) // Case 4: Worker node with --block option set to false. rayStartParams = map[string]string{"block": "false"} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, "false", rayStartParams["block"], fmt.Sprintf("Expected `%v` but got `%v`", "false", rayStartParams["block"])) } @@ -1031,23 +1031,23 @@ func TestSetMissingRayStartParamsDashboardHost(t *testing.T) { // Case 1: Head node with no dashboard-host option set. rayStartParams := map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, "0.0.0.0", rayStartParams["dashboard-host"], fmt.Sprintf("Expected `%v` but got `%v`", "0.0.0.0", rayStartParams["dashboard-host"])) // Case 2: Head node with dashboard-host option set. rayStartParams = map[string]string{"dashboard-host": "localhost"} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.HeadNode, headPort, "") + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.HeadNode, headPort, "") assert.Equal(t, "localhost", rayStartParams["dashboard-host"], fmt.Sprintf("Expected `%v` but got `%v`", "localhost", rayStartParams["dashboard-host"])) // Case 3: Worker node with no dashboard-host option set. rayStartParams = map[string]string{} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.NotContains(t, rayStartParams, "dashboard-host", "workers should not have an dashboard-host option set.") // Case 4: Worker node with dashboard-host option set. // To maximize user empowerment, this option can be enabled. However, it is important to note that the dashboard is not available on worker nodes. rayStartParams = map[string]string{"dashboard-host": "localhost"} - rayStartParams = setMissingRayStartParams(rayStartParams, rayiov1alpha1.WorkerNode, headPort, fqdnRayIP) + rayStartParams = setMissingRayStartParams(rayStartParams, ray747213v1alpha1.WorkerNode, headPort, fqdnRayIP) assert.Equal(t, "localhost", rayStartParams["dashboard-host"], fmt.Sprintf("Expected `%v` but got `%v`", "localhost", rayStartParams["dashboard-host"])) } diff --git a/ray-operator/controllers/ray/common/rbac.go b/ray-operator/controllers/ray/common/rbac.go index cbeb1fd0b2f..b36458f4426 100644 --- a/ray-operator/controllers/ray/common/rbac.go +++ b/ray-operator/controllers/ray/common/rbac.go @@ -1,7 +1,7 @@ package common import ( - "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -9,7 +9,7 @@ import ( ) // BuildServiceAccount creates a new ServiceAccount for a head pod with autoscaler. -func BuildServiceAccount(cluster *v1alpha1.RayCluster) (*v1.ServiceAccount, error) { +func BuildServiceAccount(cluster *rayv1alpha1.RayCluster) (*v1.ServiceAccount, error) { sa := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: utils.GetHeadGroupServiceAccountName(cluster), @@ -26,7 +26,7 @@ func BuildServiceAccount(cluster *v1alpha1.RayCluster) (*v1.ServiceAccount, erro } // BuildRole creates a new Role for an RayCluster resource. -func BuildRole(cluster *v1alpha1.RayCluster) (*rbacv1.Role, error) { +func BuildRole(cluster *rayv1alpha1.RayCluster) (*rbacv1.Role, error) { role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: cluster.Name, @@ -55,7 +55,7 @@ func BuildRole(cluster *v1alpha1.RayCluster) (*rbacv1.Role, error) { } // BuildRole -func BuildRoleBinding(cluster *v1alpha1.RayCluster) (*rbacv1.RoleBinding, error) { +func BuildRoleBinding(cluster *rayv1alpha1.RayCluster) (*rbacv1.RoleBinding, error) { serviceAccountName := utils.GetHeadGroupServiceAccountName(cluster) rb := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ diff --git a/ray-operator/controllers/ray/common/rbac_test.go b/ray-operator/controllers/ray/common/rbac_test.go index f51184f05fe..b16e60d3bf8 100644 --- a/ray-operator/controllers/ray/common/rbac_test.go +++ b/ray-operator/controllers/ray/common/rbac_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/stretchr/testify/assert" @@ -15,17 +15,17 @@ import ( // Test subject and role ref names in the function BuildRoleBinding. func TestBuildRoleBindingSubjectAndRoleRefName(t *testing.T) { tests := map[string]struct { - input *rayiov1alpha1.RayCluster + input *rayv1alpha1.RayCluster want []string }{ "Ray cluster with head group service account": { - input: &rayiov1alpha1.RayCluster{ + input: &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ ServiceAccountName: "my-service-account", @@ -37,13 +37,13 @@ func TestBuildRoleBindingSubjectAndRoleRefName(t *testing.T) { want: []string{"my-service-account", "raycluster-sample"}, }, "Ray cluster without head group service account": { - input: &rayiov1alpha1.RayCluster{ + input: &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{}, }, @@ -53,13 +53,13 @@ func TestBuildRoleBindingSubjectAndRoleRefName(t *testing.T) { want: []string{"raycluster-sample", "raycluster-sample"}, }, "Ray cluster with a long name and without head group service account": { - input: &rayiov1alpha1.RayCluster{ + input: &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: longString(t), // 200 chars long Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{}, }, diff --git a/ray-operator/controllers/ray/common/service.go b/ray-operator/controllers/ray/common/service.go index d8db2e7f0c8..87da483fd05 100644 --- a/ray-operator/controllers/ray/common/service.go +++ b/ray-operator/controllers/ray/common/service.go @@ -7,16 +7,16 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" ) // HeadServiceLabels returns the default labels for a cluster's head service. -func HeadServiceLabels(cluster rayiov1alpha1.RayCluster) map[string]string { +func HeadServiceLabels(cluster rayv1alpha1.RayCluster) map[string]string { return map[string]string{ RayClusterLabelKey: cluster.Name, - RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode), - RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(cluster.Name, rayiov1alpha1.HeadNode)), + RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), + RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(cluster.Name, rayv1alpha1.HeadNode)), KubernetesApplicationNameLabelKey: ApplicationName, KubernetesCreatedByLabelKey: ComponentName, } @@ -24,7 +24,7 @@ func HeadServiceLabels(cluster rayiov1alpha1.RayCluster) map[string]string { // BuildServiceForHeadPod Builds the service for a pod. Currently, there is only one service that allows // the worker nodes to connect to the head node. -func BuildServiceForHeadPod(cluster rayiov1alpha1.RayCluster, labels map[string]string, annotations map[string]string) (*corev1.Service, error) { +func BuildServiceForHeadPod(cluster rayv1alpha1.RayCluster, labels map[string]string, annotations map[string]string) (*corev1.Service, error) { if labels == nil { labels = make(map[string]string) } @@ -156,7 +156,7 @@ func BuildServiceForHeadPod(cluster rayiov1alpha1.RayCluster, labels map[string] // BuildHeadServiceForRayService Builds the service for a pod. Currently, there is only one service that allows // the worker nodes to connect to the head node. // RayService controller updates the service whenever a new RayCluster serves the traffic. -func BuildHeadServiceForRayService(rayService rayiov1alpha1.RayService, rayCluster rayiov1alpha1.RayCluster) (*corev1.Service, error) { +func BuildHeadServiceForRayService(rayService rayv1alpha1.RayService, rayCluster rayv1alpha1.RayCluster) (*corev1.Service, error) { service, err := BuildServiceForHeadPod(rayCluster, nil, nil) if err != nil { return nil, err @@ -166,15 +166,15 @@ func BuildHeadServiceForRayService(rayService rayiov1alpha1.RayService, rayClust service.ObjectMeta.Namespace = rayService.Namespace service.ObjectMeta.Labels = map[string]string{ RayServiceLabelKey: rayService.Name, - RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode), - RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(rayService.Name, rayiov1alpha1.HeadNode)), + RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), + RayIDLabelKey: utils.CheckLabel(utils.GenerateIdentifier(rayService.Name, rayv1alpha1.HeadNode)), } return service, nil } // BuildServeServiceForRayService builds the service for head node and worker nodes who have healthy http proxy to serve traffics. -func BuildServeServiceForRayService(rayService rayiov1alpha1.RayService, rayCluster rayiov1alpha1.RayCluster) (*corev1.Service, error) { +func BuildServeServiceForRayService(rayService rayv1alpha1.RayService, rayCluster rayv1alpha1.RayCluster) (*corev1.Service, error) { labels := map[string]string{ RayServiceLabelKey: rayService.Name, RayClusterServingServiceLabelKey: utils.GenerateServeServiceLabel(rayService.Name), @@ -210,7 +210,7 @@ func BuildServeServiceForRayService(rayService rayiov1alpha1.RayService, rayClus } // BuildDashboardService Builds the service for dashboard agent and head node. -func BuildDashboardService(cluster rayiov1alpha1.RayCluster) (*corev1.Service, error) { +func BuildDashboardService(cluster rayv1alpha1.RayCluster) (*corev1.Service, error) { labels := map[string]string{ RayClusterDashboardServiceLabelKey: utils.GenerateDashboardAgentLabel(cluster.Name), } @@ -244,7 +244,7 @@ func BuildDashboardService(cluster rayiov1alpha1.RayCluster) (*corev1.Service, e } // getServicePorts will either user passing ports or default ports to create service. -func getServicePorts(cluster rayiov1alpha1.RayCluster) map[string]int32 { +func getServicePorts(cluster rayv1alpha1.RayCluster) map[string]int32 { ports, err := getPortsFromCluster(cluster) // Assign default ports if err != nil || len(ports) == 0 { @@ -271,7 +271,7 @@ func getServicePorts(cluster rayiov1alpha1.RayCluster) map[string]int32 { // getPortsFromCluster get the ports from head container and directly map them in service // It's user's responsibility to maintain rayStartParam ports and container ports mapping // TODO: Consider to infer ports from rayStartParams (source of truth) in the future. -func getPortsFromCluster(cluster rayiov1alpha1.RayCluster) (map[string]int32, error) { +func getPortsFromCluster(cluster rayv1alpha1.RayCluster) (map[string]int32, error) { svcPorts := map[string]int32{} index := utils.FindRayContainerIndex(cluster.Spec.HeadGroupSpec.Template.Spec) @@ -297,7 +297,7 @@ func getDefaultPorts() map[string]int32 { } // IsAgentServiceEnabled check if the agent service is enabled for RayCluster. -func IsAgentServiceEnabled(instance *rayiov1alpha1.RayCluster) bool { +func IsAgentServiceEnabled(instance *rayv1alpha1.RayCluster) bool { enableAgentServiceValue, exist := instance.Annotations[EnableAgentServiceKey] if exist && enableAgentServiceValue == EnableAgentServiceTrue { return true diff --git a/ray-operator/controllers/ray/common/service_test.go b/ray-operator/controllers/ray/common/service_test.go index 96667b14b2c..cf30f3a753e 100644 --- a/ray-operator/controllers/ray/common/service_test.go +++ b/ray-operator/controllers/ray/common/service_test.go @@ -6,7 +6,7 @@ import ( "reflect" "testing" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" "github.com/stretchr/testify/assert" @@ -21,18 +21,18 @@ var ( headServiceAnnotationValue1 = "HeadServiceAnnotationValue1" headServiceAnnotationKey2 = "HeadServiceAnnotationKey2" headServiceAnnotationValue2 = "HeadServiceAnnotationValue2" - instanceWithWrongSvc = &rayiov1alpha1.RayCluster{ + instanceWithWrongSvc = &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.0", HeadServiceAnnotations: map[string]string{ headServiceAnnotationKey1: headServiceAnnotationValue1, headServiceAnnotationKey2: headServiceAnnotationValue2, }, - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32Ptr(1), RayStartParams: map[string]string{ "port": "6379", @@ -94,7 +94,7 @@ func TestBuildServiceForHeadPod(t *testing.T) { } actualResult = svc.Spec.Selector[RayNodeTypeLabelKey] - expectedResult = string(rayiov1alpha1.HeadNode) + expectedResult = string(rayv1alpha1.HeadNode) if !reflect.DeepEqual(expectedResult, actualResult) { t.Fatalf("Expected `%v` but got `%v`", expectedResult, actualResult) } diff --git a/ray-operator/controllers/ray/raycluster_controller.go b/ray-operator/controllers/ray/raycluster_controller.go index 53ddb6d551b..6cc8494443d 100644 --- a/ray-operator/controllers/ray/raycluster_controller.go +++ b/ray-operator/controllers/ray/raycluster_controller.go @@ -18,7 +18,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "k8s.io/client-go/tools/record" "github.com/go-logr/logr" @@ -111,7 +111,7 @@ func (r *RayClusterReconciler) Reconcile(ctx context.Context, request ctrl.Reque } // Try to fetch the RayCluster instance - instance := &rayiov1alpha1.RayCluster{} + instance := &rayv1alpha1.RayCluster{} if err = r.Get(context.TODO(), request.NamespacedName, instance); err == nil { return r.rayClusterReconcile(request, instance) } @@ -197,7 +197,7 @@ func (r *RayClusterReconciler) eventReconcile(request ctrl.Request, event *corev return ctrl.Result{}, nil } -func (r *RayClusterReconciler) rayClusterReconcile(request ctrl.Request, instance *rayiov1alpha1.RayCluster) (ctrl.Result, error) { +func (r *RayClusterReconciler) rayClusterReconcile(request ctrl.Request, instance *rayv1alpha1.RayCluster) (ctrl.Result, error) { _ = r.Log.WithValues("raycluster", request.NamespacedName) r.Log.Info("reconciling RayCluster", "cluster name", request.Name) @@ -206,31 +206,31 @@ func (r *RayClusterReconciler) rayClusterReconcile(request ctrl.Request, instanc return ctrl.Result{}, nil } if err := r.reconcileAutoscalerServiceAccount(instance); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } if err := r.reconcileAutoscalerRole(instance); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } if err := r.reconcileAutoscalerRoleBinding(instance); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } if err := r.reconcileIngress(instance); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } if err := r.reconcileServices(instance, common.HeadService); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err @@ -238,20 +238,20 @@ func (r *RayClusterReconciler) rayClusterReconcile(request ctrl.Request, instanc if common.IsAgentServiceEnabled(instance) { // Reconcile agent service only when enabled in annotation. if err := r.reconcileServices(instance, common.AgentService); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } } if err := r.reconcilePods(instance); err != nil { - if updateErr := r.updateClusterState(instance, rayiov1alpha1.Failed); updateErr != nil { + if updateErr := r.updateClusterState(instance, rayv1alpha1.Failed); updateErr != nil { r.Log.Error(updateErr, "RayCluster update state error", "cluster name", request.Name) } if updateErr := r.updateClusterReason(instance, err.Error()); updateErr != nil { r.Log.Error(updateErr, "RayCluster update reason error", "cluster name", request.Name) } - r.Recorder.Event(instance, corev1.EventTypeWarning, string(rayiov1alpha1.PodReconciliationError), err.Error()) + r.Recorder.Event(instance, corev1.EventTypeWarning, string(rayv1alpha1.PodReconciliationError), err.Error()) return ctrl.Result{RequeueAfter: DefaultRequeueDuration}, err } // update the status if needed @@ -276,7 +276,7 @@ func (r *RayClusterReconciler) rayClusterReconcile(request ctrl.Request, instanc return ctrl.Result{RequeueAfter: time.Duration(requeueAfterSeconds) * time.Second}, nil } -func (r *RayClusterReconciler) reconcileIngress(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) reconcileIngress(instance *rayv1alpha1.RayCluster) error { if instance.Spec.HeadGroupSpec.EnableIngress == nil || !*instance.Spec.HeadGroupSpec.EnableIngress { return nil } @@ -311,7 +311,7 @@ func (r *RayClusterReconciler) reconcileIngress(instance *rayiov1alpha1.RayClust return nil } -func (r *RayClusterReconciler) reconcileServices(instance *rayiov1alpha1.RayCluster, serviceType common.ServiceType) error { +func (r *RayClusterReconciler) reconcileServices(instance *rayv1alpha1.RayCluster, serviceType common.ServiceType) error { services := corev1.ServiceList{} var filterLabels client.MatchingLabels if serviceType == common.HeadService { @@ -381,10 +381,10 @@ func (r *RayClusterReconciler) reconcileServices(instance *rayiov1alpha1.RayClus return nil } -func (r *RayClusterReconciler) reconcilePods(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) reconcilePods(instance *rayv1alpha1.RayCluster) error { // check if all the pods exist headPods := corev1.PodList{} - filterLabels := client.MatchingLabels{common.RayClusterLabelKey: instance.Name, common.RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode)} + filterLabels := client.MatchingLabels{common.RayClusterLabelKey: instance.Name, common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode)} if err := r.List(context.TODO(), &headPods, client.InNamespace(instance.Namespace), filterLabels); err != nil { return err } @@ -649,7 +649,7 @@ func (r *RayClusterReconciler) reconcilePods(instance *rayiov1alpha1.RayCluster) return nil } -func (r *RayClusterReconciler) updateLocalWorkersToDelete(worker *rayiov1alpha1.WorkerGroupSpec, runningItems []corev1.Pod) { +func (r *RayClusterReconciler) updateLocalWorkersToDelete(worker *rayv1alpha1.WorkerGroupSpec, runningItems []corev1.Pod) { var actualWorkersToDelete []string itemMap := make(map[string]int) @@ -668,7 +668,7 @@ func (r *RayClusterReconciler) updateLocalWorkersToDelete(worker *rayiov1alpha1. worker.ScaleStrategy.WorkersToDelete = actualWorkersToDelete } -func (r *RayClusterReconciler) createHeadIngress(ingress *networkingv1.Ingress, instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) createHeadIngress(ingress *networkingv1.Ingress, instance *rayv1alpha1.RayCluster) error { // making sure the name is valid ingress.Name = utils.CheckName(ingress.Name) if err := controllerutil.SetControllerReference(instance, ingress, r.Scheme); err != nil { @@ -688,7 +688,7 @@ func (r *RayClusterReconciler) createHeadIngress(ingress *networkingv1.Ingress, return nil } -func (r *RayClusterReconciler) createService(raySvc *corev1.Service, instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) createService(raySvc *corev1.Service, instance *rayv1alpha1.RayCluster) error { // making sure the name is valid raySvc.Name = utils.CheckName(raySvc.Name) // Set controller reference @@ -709,7 +709,7 @@ func (r *RayClusterReconciler) createService(raySvc *corev1.Service, instance *r return nil } -func (r *RayClusterReconciler) createHeadPod(instance rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) createHeadPod(instance rayv1alpha1.RayCluster) error { // build the pod then create it pod := r.buildHeadPod(instance) podIdentifier := types.NamespacedName{ @@ -744,7 +744,7 @@ func (r *RayClusterReconciler) createHeadPod(instance rayiov1alpha1.RayCluster) return nil } -func (r *RayClusterReconciler) createWorkerPod(instance rayiov1alpha1.RayCluster, worker rayiov1alpha1.WorkerGroupSpec) error { +func (r *RayClusterReconciler) createWorkerPod(instance rayv1alpha1.RayCluster, worker rayv1alpha1.WorkerGroupSpec) error { // build the pod then create it pod := r.buildWorkerPod(instance, worker) podIdentifier := types.NamespacedName{ @@ -782,8 +782,8 @@ func (r *RayClusterReconciler) createWorkerPod(instance rayiov1alpha1.RayCluster } // Build head instance pod(s). -func (r *RayClusterReconciler) buildHeadPod(instance rayiov1alpha1.RayCluster) corev1.Pod { - podName := strings.ToLower(instance.Name + common.DashSymbol + string(rayiov1alpha1.HeadNode) + common.DashSymbol) +func (r *RayClusterReconciler) buildHeadPod(instance rayv1alpha1.RayCluster) corev1.Pod { + podName := strings.ToLower(instance.Name + common.DashSymbol + string(rayv1alpha1.HeadNode) + common.DashSymbol) podName = utils.CheckName(podName) // making sure the name is valid fqdnRayIP := utils.GenerateFQDNServiceName(instance.Name, instance.Namespace) // Fully Qualified Domain Name // The Ray head port used by workers to connect to the cluster (GCS server port for Ray >= 1.11.0, Redis port for older Ray.) @@ -792,7 +792,7 @@ func (r *RayClusterReconciler) buildHeadPod(instance rayiov1alpha1.RayCluster) c podConf := common.DefaultHeadPodTemplate(instance, instance.Spec.HeadGroupSpec, podName, headPort) r.Log.Info("head pod labels", "labels", podConf.Labels) creatorName := getCreator(instance) - pod := common.BuildPod(podConf, rayiov1alpha1.HeadNode, instance.Spec.HeadGroupSpec.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP) + pod := common.BuildPod(podConf, rayv1alpha1.HeadNode, instance.Spec.HeadGroupSpec.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP) // Set raycluster instance as the owner and controller if err := controllerutil.SetControllerReference(&instance, &pod, r.Scheme); err != nil { r.Log.Error(err, "Failed to set controller reference for raycluster pod") @@ -801,7 +801,7 @@ func (r *RayClusterReconciler) buildHeadPod(instance rayiov1alpha1.RayCluster) c return pod } -func getCreator(instance rayiov1alpha1.RayCluster) string { +func getCreator(instance rayv1alpha1.RayCluster) string { if instance.Labels == nil { return "" } @@ -815,8 +815,8 @@ func getCreator(instance rayiov1alpha1.RayCluster) string { } // Build worker instance pods. -func (r *RayClusterReconciler) buildWorkerPod(instance rayiov1alpha1.RayCluster, worker rayiov1alpha1.WorkerGroupSpec) corev1.Pod { - podName := strings.ToLower(instance.Name + common.DashSymbol + string(rayiov1alpha1.WorkerNode) + common.DashSymbol + worker.GroupName + common.DashSymbol) +func (r *RayClusterReconciler) buildWorkerPod(instance rayv1alpha1.RayCluster, worker rayv1alpha1.WorkerGroupSpec) corev1.Pod { + podName := strings.ToLower(instance.Name + common.DashSymbol + string(rayv1alpha1.WorkerNode) + common.DashSymbol + worker.GroupName + common.DashSymbol) podName = utils.CheckName(podName) // making sure the name is valid fqdnRayIP := utils.GenerateFQDNServiceName(instance.Name, instance.Namespace) // Fully Qualified Domain Name // The Ray head port used by workers to connect to the cluster (GCS server port for Ray >= 1.11.0, Redis port for older Ray.) @@ -824,7 +824,7 @@ func (r *RayClusterReconciler) buildWorkerPod(instance rayiov1alpha1.RayCluster, autoscalingEnabled := instance.Spec.EnableInTreeAutoscaling podTemplateSpec := common.DefaultWorkerPodTemplate(instance, worker, podName, fqdnRayIP, headPort) creatorName := getCreator(instance) - pod := common.BuildPod(podTemplateSpec, rayiov1alpha1.WorkerNode, worker.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP) + pod := common.BuildPod(podTemplateSpec, rayv1alpha1.WorkerNode, worker.RayStartParams, headPort, autoscalingEnabled, creatorName, fqdnRayIP) // Set raycluster instance as the owner and controller if err := controllerutil.SetControllerReference(&instance, &pod, r.Scheme); err != nil { r.Log.Error(err, "Failed to set controller reference for raycluster pod") @@ -837,7 +837,7 @@ func (r *RayClusterReconciler) buildWorkerPod(instance rayiov1alpha1.RayCluster, func (r *RayClusterReconciler) SetupWithManager(mgr ctrl.Manager, reconcileConcurrency int) error { b := ctrl.NewControllerManagedBy(mgr). Named("raycluster-controller"). - For(&rayiov1alpha1.RayCluster{}, builder.WithPredicates(predicate.Or( + For(&rayv1alpha1.RayCluster{}, builder.WithPredicates(predicate.Or( predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}, predicate.AnnotationChangedPredicate{}, @@ -879,7 +879,7 @@ func (r *RayClusterReconciler) SetupWithManager(mgr ctrl.Manager, reconcileConcu Complete(r) } -func (r *RayClusterReconciler) updateStatus(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) updateStatus(instance *rayv1alpha1.RayCluster) error { // TODO (kevin85421): ObservedGeneration should be used to determine whether to update this CR or not. instance.Status.ObservedGeneration = instance.ObjectMeta.Generation @@ -897,14 +897,14 @@ func (r *RayClusterReconciler) updateStatus(instance *rayiov1alpha1.RayCluster) // validation for the RayStartParam for the state. isValid, err := common.ValidateHeadRayStartParams(instance.Spec.HeadGroupSpec) if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, string(rayiov1alpha1.RayConfigError), err.Error()) + r.Recorder.Event(instance, corev1.EventTypeWarning, string(rayv1alpha1.RayConfigError), err.Error()) } // only in invalid status that we update the status to unhealthy. if !isValid { - instance.Status.State = rayiov1alpha1.Unhealthy + instance.Status.State = rayv1alpha1.Unhealthy } else { if utils.CheckAllPodsRunning(runtimePods) { - instance.Status.State = rayiov1alpha1.Ready + instance.Status.State = rayv1alpha1.Ready } } @@ -926,9 +926,9 @@ func (r *RayClusterReconciler) updateStatus(instance *rayiov1alpha1.RayCluster) } // Best effort to obtain the ip of the head node. -func (r *RayClusterReconciler) getHeadPodIP(instance *rayiov1alpha1.RayCluster) (string, error) { +func (r *RayClusterReconciler) getHeadPodIP(instance *rayv1alpha1.RayCluster) (string, error) { runtimePods := corev1.PodList{} - filterLabels := client.MatchingLabels{common.RayClusterLabelKey: instance.Name, common.RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode)} + filterLabels := client.MatchingLabels{common.RayClusterLabelKey: instance.Name, common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode)} if err := r.List(context.TODO(), &runtimePods, client.InNamespace(instance.Namespace), filterLabels); err != nil { r.Log.Error(err, "Failed to list pods while getting head pod ip.") return "", err @@ -940,7 +940,7 @@ func (r *RayClusterReconciler) getHeadPodIP(instance *rayiov1alpha1.RayCluster) return runtimePods.Items[0].Status.PodIP, nil } -func (r *RayClusterReconciler) getHeadServiceIP(instance *rayiov1alpha1.RayCluster) (string, error) { +func (r *RayClusterReconciler) getHeadServiceIP(instance *rayv1alpha1.RayCluster) (string, error) { runtimeServices := corev1.ServiceList{} filterLabels := client.MatchingLabels(common.HeadServiceLabels(*instance)) if err := r.List(context.TODO(), &runtimeServices, client.InNamespace(instance.Namespace), filterLabels); err != nil { @@ -957,7 +957,7 @@ func (r *RayClusterReconciler) getHeadServiceIP(instance *rayiov1alpha1.RayClust return runtimeServices.Items[0].Spec.ClusterIP, nil } -func (r *RayClusterReconciler) updateEndpoints(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) updateEndpoints(instance *rayv1alpha1.RayCluster) error { // TODO: (@scarlet25151) There may be several K8s Services for a RayCluster. // We assume we can find the right one by filtering Services with appropriate label selectors // and picking the first one. We may need to select by name in the future if the Service naming is stable. @@ -997,7 +997,7 @@ func (r *RayClusterReconciler) updateEndpoints(instance *rayiov1alpha1.RayCluste return nil } -func (r *RayClusterReconciler) updateHeadInfo(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) updateHeadInfo(instance *rayv1alpha1.RayCluster) error { if ip, err := r.getHeadPodIP(instance); err != nil { return err } else { @@ -1013,7 +1013,7 @@ func (r *RayClusterReconciler) updateHeadInfo(instance *rayiov1alpha1.RayCluster return nil } -func (r *RayClusterReconciler) reconcileAutoscalerServiceAccount(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) reconcileAutoscalerServiceAccount(instance *rayv1alpha1.RayCluster) error { if instance.Spec.EnableInTreeAutoscaling == nil || !*instance.Spec.EnableInTreeAutoscaling { return nil } @@ -1055,7 +1055,7 @@ func (r *RayClusterReconciler) reconcileAutoscalerServiceAccount(instance *rayio return nil } -func (r *RayClusterReconciler) reconcileAutoscalerRole(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) reconcileAutoscalerRole(instance *rayv1alpha1.RayCluster) error { if instance.Spec.EnableInTreeAutoscaling == nil || !*instance.Spec.EnableInTreeAutoscaling { return nil } @@ -1096,7 +1096,7 @@ func (r *RayClusterReconciler) reconcileAutoscalerRole(instance *rayiov1alpha1.R return nil } -func (r *RayClusterReconciler) reconcileAutoscalerRoleBinding(instance *rayiov1alpha1.RayCluster) error { +func (r *RayClusterReconciler) reconcileAutoscalerRoleBinding(instance *rayv1alpha1.RayCluster) error { if instance.Spec.EnableInTreeAutoscaling == nil || !*instance.Spec.EnableInTreeAutoscaling { return nil } @@ -1137,12 +1137,12 @@ func (r *RayClusterReconciler) reconcileAutoscalerRoleBinding(instance *rayiov1a return nil } -func (r *RayClusterReconciler) updateClusterState(instance *rayiov1alpha1.RayCluster, clusterState rayiov1alpha1.ClusterState) error { +func (r *RayClusterReconciler) updateClusterState(instance *rayv1alpha1.RayCluster, clusterState rayv1alpha1.ClusterState) error { instance.Status.State = clusterState return r.Status().Update(context.Background(), instance) } -func (r *RayClusterReconciler) updateClusterReason(instance *rayiov1alpha1.RayCluster, clusterReason string) error { +func (r *RayClusterReconciler) updateClusterReason(instance *rayv1alpha1.RayCluster, clusterReason string) error { instance.Status.Reason = clusterReason return r.Status().Update(context.Background(), instance) } diff --git a/ray-operator/controllers/ray/raycluster_controller_fake_test.go b/ray-operator/controllers/ray/raycluster_controller_fake_test.go index f108e28ca22..04e90a50887 100644 --- a/ray-operator/controllers/ray/raycluster_controller_fake_test.go +++ b/ray-operator/controllers/ray/raycluster_controller_fake_test.go @@ -23,7 +23,7 @@ import ( "github.com/ray-project/kuberay/ray-operator/controllers/ray/common" . "github.com/onsi/ginkgo" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/pkg/client/clientset/versioned/scheme" "github.com/stretchr/testify/assert" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -57,7 +57,7 @@ var ( expectReplicaNum int32 testPods []runtime.Object testPodsNoHeadIP []runtime.Object - testRayCluster *rayiov1alpha1.RayCluster + testRayCluster *rayv1alpha1.RayCluster headSelector labels.Selector headNodeIP string testServices []runtime.Object @@ -87,7 +87,7 @@ func setupTest(t *testing.T) { Labels: map[string]string{ common.RayNodeLabelKey: "yes", common.RayClusterLabelKey: instanceName, - common.RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode), + common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), common.RayNodeGroupLabelKey: headGroupNameStr, }, }, @@ -235,7 +235,7 @@ func setupTest(t *testing.T) { Labels: map[string]string{ common.RayNodeLabelKey: "yes", common.RayClusterLabelKey: instanceName, - common.RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode), + common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), common.RayNodeGroupLabelKey: headGroupNameStr, }, }, @@ -245,15 +245,15 @@ func setupTest(t *testing.T) { }, }, } - testRayCluster = &rayiov1alpha1.RayCluster{ + testRayCluster = &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: instanceName, Namespace: namespaceStr, }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.0", EnableInTreeAutoscaling: &enableInTreeAutoscaling, - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32Ptr(1), RayStartParams: map[string]string{ "port": "6379", @@ -286,7 +286,7 @@ func setupTest(t *testing.T) { }, }, }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32Ptr(expectReplicaNum), MinReplicas: pointer.Int32Ptr(0), @@ -318,7 +318,7 @@ func setupTest(t *testing.T) { }, }, }, - ScaleStrategy: rayiov1alpha1.ScaleStrategy{ + ScaleStrategy: rayv1alpha1.ScaleStrategy{ WorkersToDelete: workersToDelete, }, }, @@ -935,7 +935,7 @@ func TestReconcile_UpdateClusterReason(t *testing.T) { setupTest(t) defer tearDown(t) newScheme := runtime.NewScheme() - _ = rayiov1alpha1.AddToScheme(newScheme) + _ = rayv1alpha1.AddToScheme(newScheme) fakeClient := clientFake.NewClientBuilder().WithScheme(newScheme).WithRuntimeObjects(testRayCluster).Build() @@ -943,7 +943,7 @@ func TestReconcile_UpdateClusterReason(t *testing.T) { Name: instanceName, Namespace: namespaceStr, } - cluster := rayiov1alpha1.RayCluster{} + cluster := rayv1alpha1.RayCluster{} err := fakeClient.Get(context.Background(), namespacedName, &cluster) assert.Nil(t, err, "Fail to get RayCluster") assert.Empty(t, cluster.Status.Reason, "Cluster reason should be empty") @@ -1001,7 +1001,7 @@ func TestGetHeadPodIP(t *testing.T) { Namespace: namespaceStr, Labels: map[string]string{ common.RayClusterLabelKey: instanceName, - common.RayNodeTypeLabelKey: string(rayiov1alpha1.HeadNode), + common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), common.RayNodeGroupLabelKey: headGroupNameStr, }, }, @@ -1132,7 +1132,7 @@ func TestUpdateStatusObservedGeneration(t *testing.T) { // Create a new scheme with CRDs, Pod, Service schemes. newScheme := runtime.NewScheme() - _ = rayiov1alpha1.AddToScheme(newScheme) + _ = rayv1alpha1.AddToScheme(newScheme) _ = corev1.AddToScheme(newScheme) // To update the status of RayCluster with `r.Status().Update()`, @@ -1159,7 +1159,7 @@ func TestUpdateStatusObservedGeneration(t *testing.T) { Name: instanceName, Namespace: namespaceStr, } - cluster := rayiov1alpha1.RayCluster{} + cluster := rayv1alpha1.RayCluster{} err = fakeClient.Get(context.Background(), namespacedName, &cluster) assert.Nil(t, err, "Fail to get RayCluster") assert.Equal(t, int64(-1), cluster.Status.ObservedGeneration) diff --git a/ray-operator/controllers/ray/raycluster_controller_test.go b/ray-operator/controllers/ray/raycluster_controller_test.go index dd17bfc4e99..5500b19bb03 100644 --- a/ray-operator/controllers/ray/raycluster_controller_test.go +++ b/ray-operator/controllers/ray/raycluster_controller_test.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -45,15 +45,15 @@ var _ = Context("Inside the default namespace", func() { var headPods corev1.PodList enableInTreeAutoscaling := true - myRayCluster := &rayiov1alpha1.RayCluster{ + myRayCluster := &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.0", EnableInTreeAutoscaling: &enableInTreeAutoscaling, - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ RayStartParams: map[string]string{ "port": "6379", "object-manager-port": "12345", @@ -84,7 +84,7 @@ var _ = Context("Inside the default namespace", func() { }, }, }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32(3), MinReplicas: pointer.Int32(0), @@ -141,7 +141,7 @@ var _ = Context("Inside the default namespace", func() { Eventually( getResourceFunc(ctx, client.ObjectKey{Name: "raycluster-sample-head-svc", Namespace: "default"}, svc), time.Second*15, time.Millisecond*500).Should(BeNil(), "My head service = %v", svc) - Expect(svc.Spec.Selector[common.RayIDLabelKey]).Should(Equal(utils.GenerateIdentifier(myRayCluster.Name, rayiov1alpha1.HeadNode))) + Expect(svc.Spec.Selector[common.RayIDLabelKey]).Should(Equal(utils.GenerateIdentifier(myRayCluster.Name, rayv1alpha1.HeadNode))) }) It("should create 3 workers", func() { @@ -213,7 +213,7 @@ var _ = Context("Inside the default namespace", func() { It("cluster's .status.state should be updated to 'ready' shortly after all Pods are Running", func() { Eventually( getClusterState(ctx, "default", myRayCluster.Name), - time.Second*(common.RAYCLUSTER_DEFAULT_REQUEUE_SECONDS+5), time.Millisecond*500).Should(Equal(rayiov1alpha1.Ready)) + time.Second*(common.RAYCLUSTER_DEFAULT_REQUEUE_SECONDS+5), time.Millisecond*500).Should(Equal(rayv1alpha1.Ready)) }) It("should re-create a deleted worker", func() { @@ -332,9 +332,9 @@ func listResourceFunc(ctx context.Context, workerPods *corev1.PodList, opt ...cl } } -func getClusterState(ctx context.Context, namespace string, clusterName string) func() rayiov1alpha1.ClusterState { - return func() rayiov1alpha1.ClusterState { - var cluster rayiov1alpha1.RayCluster +func getClusterState(ctx context.Context, namespace string, clusterName string) func() rayv1alpha1.ClusterState { + return func() rayv1alpha1.ClusterState { + var cluster rayv1alpha1.RayCluster if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, &cluster); err != nil { log.Fatal(err) } diff --git a/ray-operator/controllers/ray/rayjob_controller_suspended_test.go b/ray-operator/controllers/ray/rayjob_controller_suspended_test.go index 689fc4b33a7..14f6456f105 100644 --- a/ray-operator/controllers/ray/rayjob_controller_suspended_test.go +++ b/ray-operator/controllers/ray/rayjob_controller_suspended_test.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" @@ -41,19 +41,19 @@ var _ = Context("Inside the default namespace", func() { ctx := context.TODO() var workerPods corev1.PodList var headPods corev1.PodList - mySuspendedRayCluster := &rayiov1alpha1.RayCluster{} + mySuspendedRayCluster := &rayv1alpha1.RayCluster{} - mySuspendedRayJob := &rayiov1alpha1.RayJob{ + mySuspendedRayJob := &rayv1alpha1.RayJob{ ObjectMeta: metav1.ObjectMeta{ Name: "rayjob-test-suspend", Namespace: "default", }, - Spec: rayiov1alpha1.RayJobSpec{ + Spec: rayv1alpha1.RayJobSpec{ Suspend: true, Entrypoint: "sleep 999", - RayClusterSpec: &rayiov1alpha1.RayClusterSpec{ + RayClusterSpec: &rayv1alpha1.RayClusterSpec{ RayVersion: "2.4.0", - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ ServiceType: corev1.ServiceTypeClusterIP, Replicas: pointer.Int32(1), RayStartParams: map[string]string{ @@ -123,7 +123,7 @@ var _ = Context("Inside the default namespace", func() { }, }, }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32(3), MinReplicas: pointer.Int32(0), @@ -194,7 +194,7 @@ var _ = Context("Inside the default namespace", func() { It("should have deployment status suspended", func() { Eventually( getRayJobDeploymentStatus(ctx, mySuspendedRayJob), - time.Second*5, time.Millisecond*500).Should(Equal(rayiov1alpha1.JobDeploymentStatusSuspended)) + time.Second*5, time.Millisecond*500).Should(Equal(rayv1alpha1.JobDeploymentStatusSuspended)) }) It("should NOT create a raycluster object", func() { @@ -302,8 +302,8 @@ var _ = Context("Inside the default namespace", func() { }) }) -func getRayJobDeploymentStatus(ctx context.Context, rayJob *rayiov1alpha1.RayJob) func() (rayiov1alpha1.JobDeploymentStatus, error) { - return func() (rayiov1alpha1.JobDeploymentStatus, error) { +func getRayJobDeploymentStatus(ctx context.Context, rayJob *rayv1alpha1.RayJob) func() (rayv1alpha1.JobDeploymentStatus, error) { + return func() (rayv1alpha1.JobDeploymentStatus, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayJob.Name, Namespace: "default"}, rayJob); err != nil { return "", err } diff --git a/ray-operator/controllers/ray/rayjob_controller_test.go b/ray-operator/controllers/ray/rayjob_controller_test.go index 827da49f841..410cc91bb3a 100644 --- a/ray-operator/controllers/ray/rayjob_controller_test.go +++ b/ray-operator/controllers/ray/rayjob_controller_test.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" @@ -38,16 +38,16 @@ import ( // +kubebuilder:scaffold:imports ) -var myRayJob = &rayiov1alpha1.RayJob{ +var myRayJob = &rayv1alpha1.RayJob{ ObjectMeta: metav1.ObjectMeta{ Name: "rayjob-test", Namespace: "default", }, - Spec: rayiov1alpha1.RayJobSpec{ + Spec: rayv1alpha1.RayJobSpec{ Entrypoint: "sleep 999", - RayClusterSpec: &rayiov1alpha1.RayClusterSpec{ + RayClusterSpec: &rayv1alpha1.RayClusterSpec{ RayVersion: "1.12.1", - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32(1), RayStartParams: map[string]string{ "port": "6379", @@ -114,7 +114,7 @@ var myRayJob = &rayiov1alpha1.RayJob{ }, }, }, - WorkerGroupSpecs: []rayiov1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32(3), MinReplicas: pointer.Int32(0), @@ -190,7 +190,7 @@ var _ = Context("Inside the default namespace", func() { Eventually( getRayClusterNameForRayJob(ctx, myRayJob), time.Second*15, time.Millisecond*500).Should(Not(BeEmpty()), "My RayCluster name = %v", myRayJob.Status.RayClusterName) - myRayCluster := &rayiov1alpha1.RayCluster{} + myRayCluster := &rayv1alpha1.RayCluster{} Eventually( getResourceFunc(ctx, client.ObjectKey{Name: myRayJob.Status.RayClusterName, Namespace: "default"}, myRayCluster), time.Second*3, time.Millisecond*500).Should(BeNil(), "My myRayCluster = %v", myRayCluster.Name) @@ -211,7 +211,7 @@ var _ = Context("Inside the default namespace", func() { getRayClusterNameForRayJob(ctx, myRayJob), time.Second*15, time.Millisecond*500).Should(Not(BeEmpty()), "My RayCluster name = %v", myRayJob.Status.RayClusterName) - myRayCluster := &rayiov1alpha1.RayCluster{} + myRayCluster := &rayv1alpha1.RayCluster{} Eventually( getResourceFunc(ctx, client.ObjectKey{Name: myRayJob.Status.RayClusterName, Namespace: "default"}, myRayCluster), time.Second*3, time.Millisecond*500).Should(BeNil(), "My myRayCluster = %v", myRayCluster.Name) @@ -243,12 +243,12 @@ var _ = Context("Inside the default namespace", func() { Eventually( getRayClusterNameForRayJob(ctx, myRayJob), time.Second*15, time.Millisecond*500).Should(Not(BeEmpty()), "My RayCluster name = %v", myRayJob.Status.RayClusterName) - myRayJobWithClusterSelector := &rayiov1alpha1.RayJob{ + myRayJobWithClusterSelector := &rayv1alpha1.RayJob{ ObjectMeta: metav1.ObjectMeta{ Name: "rayjob-test-default-2", Namespace: "default", }, - Spec: rayiov1alpha1.RayJobSpec{ + Spec: rayv1alpha1.RayJobSpec{ Entrypoint: "sleep 999", ClusterSelector: map[string]string{}, }, @@ -269,10 +269,10 @@ var _ = Context("Inside the default namespace with autoscaler", func() { ctx := context.TODO() myRayJob := myRayJob.DeepCopy() myRayJob.Name = "rayjob-test-with-autoscaler" - upscalingMode := rayiov1alpha1.UpscalingMode("Default") + upscalingMode := rayv1alpha1.UpscalingMode("Default") imagePullPolicy := corev1.PullPolicy("IfNotPresent") myRayJob.Spec.RayClusterSpec.EnableInTreeAutoscaling = pointer.BoolPtr(true) - myRayJob.Spec.RayClusterSpec.AutoscalerOptions = &rayiov1alpha1.AutoscalerOptions{ + myRayJob.Spec.RayClusterSpec.AutoscalerOptions = &rayv1alpha1.AutoscalerOptions{ UpscalingMode: &upscalingMode, IdleTimeoutSeconds: pointer.Int32(1), ImagePullPolicy: &imagePullPolicy, @@ -297,7 +297,7 @@ var _ = Context("Inside the default namespace with autoscaler", func() { getRayClusterNameForRayJob(ctx, myRayJob), time.Second*15, time.Millisecond*500).Should(Not(BeEmpty()), "My RayCluster name = %v", myRayJob.Status.RayClusterName) - myRayCluster := &rayiov1alpha1.RayCluster{} + myRayCluster := &rayv1alpha1.RayCluster{} Eventually( getResourceFunc(ctx, client.ObjectKey{Name: myRayJob.Status.RayClusterName, Namespace: "default"}, myRayCluster), time.Second*3, time.Millisecond*500).Should(BeNil(), "My myRayCluster = %v", myRayCluster.Name) @@ -336,7 +336,7 @@ var _ = Context("Inside the default namespace with autoscaler", func() { getRayClusterNameForRayJob(ctx, myRayJob), time.Second*15, time.Millisecond*500).Should(Not(BeEmpty()), "My RayCluster name = %v", myRayJob.Status.RayClusterName) - myRayCluster := &rayiov1alpha1.RayCluster{} + myRayCluster := &rayv1alpha1.RayCluster{} Eventually( getResourceFunc(ctx, client.ObjectKey{Name: myRayJob.Status.RayClusterName, Namespace: "default"}, myRayCluster), time.Second*3, time.Millisecond*500).Should(BeNil(), "My myRayCluster = %v", myRayCluster.Name) @@ -360,7 +360,7 @@ var _ = Context("Inside the default namespace with autoscaler", func() { }) }) -func getRayClusterNameForRayJob(ctx context.Context, rayJob *rayiov1alpha1.RayJob) func() (string, error) { +func getRayClusterNameForRayJob(ctx context.Context, rayJob *rayv1alpha1.RayJob) func() (string, error) { return func() (string, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayJob.Name, Namespace: "default"}, rayJob); err != nil { return "", err @@ -369,7 +369,7 @@ func getRayClusterNameForRayJob(ctx context.Context, rayJob *rayiov1alpha1.RayJo } } -func getDashboardURLForRayJob(ctx context.Context, rayJob *rayiov1alpha1.RayJob) func() (string, error) { +func getDashboardURLForRayJob(ctx context.Context, rayJob *rayv1alpha1.RayJob) func() (string, error) { return func() (string, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayJob.Name, Namespace: "default"}, rayJob); err != nil { return "", err diff --git a/ray-operator/controllers/ray/rayservice_controller_test.go b/ray-operator/controllers/ray/rayservice_controller_test.go index 07b1bb4e850..07886198d21 100644 --- a/ray-operator/controllers/ray/rayservice_controller_test.go +++ b/ray-operator/controllers/ray/rayservice_controller_test.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/utils/pointer" @@ -47,21 +47,21 @@ var _ = Context("Inside the default namespace", func() { var numReplicas int32 = 1 var numCpus float64 = 0.1 - myRayService := &v1alpha1.RayService{ + myRayService := &rayv1alpha1.RayService{ ObjectMeta: metav1.ObjectMeta{ Name: "rayservice-sample", Namespace: "default", }, - Spec: v1alpha1.RayServiceSpec{ - ServeDeploymentGraphSpec: v1alpha1.ServeDeploymentGraphSpec{ + Spec: rayv1alpha1.RayServiceSpec{ + ServeDeploymentGraphSpec: rayv1alpha1.ServeDeploymentGraphSpec{ ImportPath: "fruit.deployment_graph", RuntimeEnv: runtimeEnvStr, - ServeConfigSpecs: []v1alpha1.ServeConfigSpec{ + ServeConfigSpecs: []rayv1alpha1.ServeConfigSpec{ { Name: "MangoStand", NumReplicas: &numReplicas, UserConfig: "price: 3", - RayActorOptions: v1alpha1.RayActorOptionSpec{ + RayActorOptions: rayv1alpha1.RayActorOptionSpec{ NumCpus: &numCpus, }, }, @@ -69,7 +69,7 @@ var _ = Context("Inside the default namespace", func() { Name: "OrangeStand", NumReplicas: &numReplicas, UserConfig: "price: 2", - RayActorOptions: v1alpha1.RayActorOptionSpec{ + RayActorOptions: rayv1alpha1.RayActorOptionSpec{ NumCpus: &numCpus, }, }, @@ -77,15 +77,15 @@ var _ = Context("Inside the default namespace", func() { Name: "PearStand", NumReplicas: &numReplicas, UserConfig: "price: 1", - RayActorOptions: v1alpha1.RayActorOptionSpec{ + RayActorOptions: rayv1alpha1.RayActorOptionSpec{ NumCpus: &numCpus, }, }, }, }, - RayClusterSpec: v1alpha1.RayClusterSpec{ + RayClusterSpec: rayv1alpha1.RayClusterSpec{ RayVersion: "1.12.1", - HeadGroupSpec: v1alpha1.HeadGroupSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Replicas: pointer.Int32(1), RayStartParams: map[string]string{ "port": "6379", @@ -160,7 +160,7 @@ var _ = Context("Inside the default namespace", func() { }, }, }, - WorkerGroupSpecs: []v1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Replicas: pointer.Int32(3), MinReplicas: pointer.Int32(0), @@ -227,7 +227,7 @@ var _ = Context("Inside the default namespace", func() { utils.GetRayHttpProxyClientFunc = utils.GetFakeRayHttpProxyClient - myRayCluster := &v1alpha1.RayCluster{} + myRayCluster := &rayv1alpha1.RayCluster{} Describe("When creating a rayservice", func() { It("should create a rayservice object", func() { @@ -282,7 +282,7 @@ var _ = Context("Inside the default namespace", func() { Eventually( getResourceFunc(ctx, client.ObjectKey{Name: utils.GenerateServiceName(myRayService.Name), Namespace: "default"}, svc), time.Second*15, time.Millisecond*500).Should(BeNil(), "My head service = %v", svc) - Expect(svc.Spec.Selector[common.RayIDLabelKey]).Should(Equal(utils.GenerateIdentifier(myRayCluster.Name, v1alpha1.HeadNode))) + Expect(svc.Spec.Selector[common.RayIDLabelKey]).Should(Equal(utils.GenerateIdentifier(myRayCluster.Name, rayv1alpha1.HeadNode))) }) It("should create a new agent service resource", func() { @@ -425,7 +425,7 @@ var _ = Context("Inside the default namespace", func() { time.Second*3, time.Millisecond*500).Should(Equal(initialClusterName), "Active RayCluster name = %v", myRayService.Status.ActiveServiceStatus.RayClusterName) // Check if all the ServeStatuses[i].Status are UNHEALTHY. - checkAllServeStatusesUnhealthy := func(ctx context.Context, rayService *v1alpha1.RayService) bool { + checkAllServeStatusesUnhealthy := func(ctx context.Context, rayService *rayv1alpha1.RayService) bool { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayService.Name, Namespace: rayService.Namespace}, rayService); err != nil { return false } @@ -582,12 +582,12 @@ func prepareFakeRayDashboardClient() utils.FakeRayDashboardClient { func generateServeStatus(time metav1.Time, status string) utils.ServeDeploymentStatuses { serveStatuses := utils.ServeDeploymentStatuses{ - ApplicationStatus: v1alpha1.AppStatus{ + ApplicationStatus: rayv1alpha1.AppStatus{ Status: "RUNNING", LastUpdateTime: &time, HealthLastUpdateTime: &time, }, - DeploymentStatuses: []v1alpha1.ServeDeploymentStatus{ + DeploymentStatuses: []rayv1alpha1.ServeDeploymentStatus{ { Name: "shallow", Status: status, @@ -615,7 +615,7 @@ func generateServeStatus(time metav1.Time, status string) utils.ServeDeploymentS return serveStatuses } -func getRayClusterNameFunc(ctx context.Context, rayService *v1alpha1.RayService) func() (string, error) { +func getRayClusterNameFunc(ctx context.Context, rayService *rayv1alpha1.RayService) func() (string, error) { return func() (string, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayService.Name, Namespace: "default"}, rayService); err != nil { return "", err @@ -624,7 +624,7 @@ func getRayClusterNameFunc(ctx context.Context, rayService *v1alpha1.RayService) } } -func getPreparingRayClusterNameFunc(ctx context.Context, rayService *v1alpha1.RayService) func() (string, error) { +func getPreparingRayClusterNameFunc(ctx context.Context, rayService *rayv1alpha1.RayService) func() (string, error) { return func() (string, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayService.Name, Namespace: "default"}, rayService); err != nil { return "", err @@ -633,7 +633,7 @@ func getPreparingRayClusterNameFunc(ctx context.Context, rayService *v1alpha1.Ra } } -func checkServiceHealth(ctx context.Context, rayService *v1alpha1.RayService) func() (bool, error) { +func checkServiceHealth(ctx context.Context, rayService *rayv1alpha1.RayService) func() (bool, error) { return func() (bool, error) { if err := k8sClient.Get(ctx, client.ObjectKey{Name: rayService.Name, Namespace: rayService.Namespace}, rayService); err != nil { return false, err @@ -661,7 +661,7 @@ func updateHeadPodToRunningAndReady(ctx context.Context, rayClusterName string) headPods := corev1.PodList{} headFilterLabels := client.MatchingLabels{ common.RayClusterLabelKey: rayClusterName, - common.RayNodeTypeLabelKey: string(v1alpha1.HeadNode), + common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), } Eventually( diff --git a/ray-operator/controllers/ray/rayservice_controller_unit_test.go b/ray-operator/controllers/ray/rayservice_controller_unit_test.go index 39a31c6d660..f9b8c7e970c 100644 --- a/ray-operator/controllers/ray/rayservice_controller_unit_test.go +++ b/ray-operator/controllers/ray/rayservice_controller_unit_test.go @@ -5,7 +5,7 @@ import ( "reflect" "testing" - "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" "github.com/ray-project/kuberay/ray-operator/controllers/ray/common" "github.com/ray-project/kuberay/ray-operator/pkg/client/clientset/versioned/scheme" "github.com/stretchr/testify/assert" @@ -23,10 +23,10 @@ func TestGenerateRayClusterJsonHash(t *testing.T) { // `generateRayClusterJsonHash` will mute fields that will not trigger new RayCluster preparation. For example, // Autoscaler will update `Replicas` and `WorkersToDelete` when scaling up/down. Hence, `hash1` should be equal to // `hash2` in this case. - cluster := v1alpha1.RayCluster{ - Spec: v1alpha1.RayClusterSpec{ + cluster := rayv1alpha1.RayCluster{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "2.4.0", - WorkerGroupSpecs: []v1alpha1.WorkerGroupSpec{ + WorkerGroupSpecs: []rayv1alpha1.WorkerGroupSpec{ { Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{}, @@ -55,8 +55,8 @@ func TestGenerateRayClusterJsonHash(t *testing.T) { } func TestCompareRayClusterJsonHash(t *testing.T) { - cluster1 := v1alpha1.RayCluster{ - Spec: v1alpha1.RayClusterSpec{ + cluster1 := rayv1alpha1.RayCluster{ + Spec: rayv1alpha1.RayClusterSpec{ RayVersion: "2.4.0", }, } @@ -77,21 +77,21 @@ func TestInconsistentRayServiceStatuses(t *testing.T) { } timeNow := metav1.Now() - oldStatus := v1alpha1.RayServiceStatuses{ - ActiveServiceStatus: v1alpha1.RayServiceStatus{ + oldStatus := rayv1alpha1.RayServiceStatuses{ + ActiveServiceStatus: rayv1alpha1.RayServiceStatus{ RayClusterName: "new-cluster", - DashboardStatus: v1alpha1.DashboardStatus{ + DashboardStatus: rayv1alpha1.DashboardStatus{ IsHealthy: true, LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ApplicationStatus: v1alpha1.AppStatus{ + ApplicationStatus: rayv1alpha1.AppStatus{ Status: "running", Message: "OK", LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ServeStatuses: []v1alpha1.ServeDeploymentStatus{ + ServeStatuses: []rayv1alpha1.ServeDeploymentStatus{ { Name: "serve-1", Status: "unhealthy", @@ -101,20 +101,20 @@ func TestInconsistentRayServiceStatuses(t *testing.T) { }, }, }, - PendingServiceStatus: v1alpha1.RayServiceStatus{ + PendingServiceStatus: rayv1alpha1.RayServiceStatus{ RayClusterName: "old-cluster", - DashboardStatus: v1alpha1.DashboardStatus{ + DashboardStatus: rayv1alpha1.DashboardStatus{ IsHealthy: true, LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ApplicationStatus: v1alpha1.AppStatus{ + ApplicationStatus: rayv1alpha1.AppStatus{ Status: "stopped", Message: "stopped", LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ServeStatuses: []v1alpha1.ServeDeploymentStatus{ + ServeStatuses: []rayv1alpha1.ServeDeploymentStatus{ { Name: "serve-1", Status: "healthy", @@ -124,12 +124,12 @@ func TestInconsistentRayServiceStatuses(t *testing.T) { }, }, }, - ServiceStatus: v1alpha1.WaitForDashboard, + ServiceStatus: rayv1alpha1.WaitForDashboard, } // Test 1: Update ServiceStatus only. newStatus := oldStatus.DeepCopy() - newStatus.ServiceStatus = v1alpha1.WaitForServeDeploymentReady + newStatus.ServiceStatus = rayv1alpha1.WaitForServeDeploymentReady assert.True(t, r.inconsistentRayServiceStatuses(oldStatus, *newStatus)) // Test 2: Test RayServiceStatus @@ -143,20 +143,20 @@ func TestInconsistentRayServiceStatuses(t *testing.T) { func TestInconsistentRayServiceStatus(t *testing.T) { timeNow := metav1.Now() - oldStatus := v1alpha1.RayServiceStatus{ + oldStatus := rayv1alpha1.RayServiceStatus{ RayClusterName: "cluster-1", - DashboardStatus: v1alpha1.DashboardStatus{ + DashboardStatus: rayv1alpha1.DashboardStatus{ IsHealthy: true, LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ApplicationStatus: v1alpha1.AppStatus{ + ApplicationStatus: rayv1alpha1.AppStatus{ Status: "running", Message: "Application is running", LastUpdateTime: &timeNow, HealthLastUpdateTime: &timeNow, }, - ServeStatuses: []v1alpha1.ServeDeploymentStatus{ + ServeStatuses: []rayv1alpha1.ServeDeploymentStatus{ { Name: "serve-1", Status: "healthy", @@ -186,11 +186,11 @@ func TestInconsistentRayServiceStatus(t *testing.T) { func TestIsHeadPodRunningAndReady(t *testing.T) { // Create a new scheme with CRDs, Pod, Service schemes. newScheme := runtime.NewScheme() - _ = v1alpha1.AddToScheme(newScheme) + _ = rayv1alpha1.AddToScheme(newScheme) _ = corev1.AddToScheme(newScheme) // Mock data - cluster := v1alpha1.RayCluster{ + cluster := rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", Namespace: "default", @@ -203,7 +203,7 @@ func TestIsHeadPodRunningAndReady(t *testing.T) { Namespace: cluster.ObjectMeta.Namespace, Labels: map[string]string{ common.RayClusterLabelKey: cluster.ObjectMeta.Name, - common.RayNodeTypeLabelKey: string(v1alpha1.HeadNode), + common.RayNodeTypeLabelKey: string(rayv1alpha1.HeadNode), }, }, Status: corev1.PodStatus{ @@ -267,18 +267,18 @@ func TestIsHeadPodRunningAndReady(t *testing.T) { func TestReconcileServices_UpdateService(t *testing.T) { // Create a new scheme with CRDs, Pod, Service schemes. newScheme := runtime.NewScheme() - _ = v1alpha1.AddToScheme(newScheme) + _ = rayv1alpha1.AddToScheme(newScheme) _ = corev1.AddToScheme(newScheme) // Mock data namespace := "ray" - cluster := v1alpha1.RayCluster{ + cluster := rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", Namespace: namespace, }, - Spec: v1alpha1.RayClusterSpec{ - HeadGroupSpec: v1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -292,7 +292,7 @@ func TestReconcileServices_UpdateService(t *testing.T) { }, }, } - rayService := v1alpha1.RayService{ + rayService := rayv1alpha1.RayService{ ObjectMeta: metav1.ObjectMeta{ Name: "test-service", Namespace: cluster.ObjectMeta.Namespace, diff --git a/ray-operator/controllers/ray/suite_test.go b/ray-operator/controllers/ray/suite_test.go index 9cd205cf401..0ab74c9d050 100644 --- a/ray-operator/controllers/ray/suite_test.go +++ b/ray-operator/controllers/ray/suite_test.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -71,7 +71,7 @@ var _ = BeforeSuite(func(done Done) { Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) - err = rayiov1alpha1.AddToScheme(scheme.Scheme) + err = rayv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/ray-operator/controllers/ray/utils/util.go b/ray-operator/controllers/ray/utils/util.go index 548163ae1f0..30e4a9bef8a 100644 --- a/ray-operator/controllers/ray/utils/util.go +++ b/ray-operator/controllers/ray/utils/util.go @@ -18,7 +18,7 @@ import ( "github.com/sirupsen/logrus" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -131,7 +131,7 @@ func GetNamespace(metaData metav1.ObjectMeta) string { // GenerateServiceName generates a ray head service name from cluster name func GenerateServiceName(clusterName string) string { - return CheckName(fmt.Sprintf("%s-%s-%s", clusterName, rayiov1alpha1.HeadNode, "svc")) + return CheckName(fmt.Sprintf("%s-%s-%s", clusterName, rayv1alpha1.HeadNode, "svc")) } // GenerateFQDNServiceName generates a Fully Qualified Domain Name. @@ -167,7 +167,7 @@ func GenerateServeServiceLabel(serviceName string) string { // GenerateIngressName generates an ingress name from cluster name func GenerateIngressName(clusterName string) string { - return fmt.Sprintf("%s-%s-%s", clusterName, rayiov1alpha1.HeadNode, "ingress") + return fmt.Sprintf("%s-%s-%s", clusterName, rayv1alpha1.HeadNode, "ingress") } // GenerateRayClusterName generates a ray cluster name from ray service name @@ -181,7 +181,7 @@ func GenerateRayJobId(rayjob string) string { } // GenerateIdentifier generates identifier of same group pods -func GenerateIdentifier(clusterName string, nodeType rayiov1alpha1.RayNodeType) string { +func GenerateIdentifier(clusterName string, nodeType rayv1alpha1.RayNodeType) string { return fmt.Sprintf("%s-%s", clusterName, nodeType) } @@ -196,7 +196,7 @@ func FindRayContainerIndex(spec corev1.PodSpec) (index int) { } // CalculateDesiredReplicas calculate desired worker replicas at the cluster level -func CalculateDesiredReplicas(cluster *rayiov1alpha1.RayCluster) int32 { +func CalculateDesiredReplicas(cluster *rayv1alpha1.RayCluster) int32 { count := int32(0) for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { count += *nodeGroup.Replicas @@ -206,7 +206,7 @@ func CalculateDesiredReplicas(cluster *rayiov1alpha1.RayCluster) int32 { } // CalculateMinReplicas calculates min worker replicas at the cluster level -func CalculateMinReplicas(cluster *rayiov1alpha1.RayCluster) int32 { +func CalculateMinReplicas(cluster *rayv1alpha1.RayCluster) int32 { count := int32(0) for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { count += *nodeGroup.MinReplicas @@ -216,7 +216,7 @@ func CalculateMinReplicas(cluster *rayiov1alpha1.RayCluster) int32 { } // CalculateMaxReplicas calculates max worker replicas at the cluster level -func CalculateMaxReplicas(cluster *rayiov1alpha1.RayCluster) int32 { +func CalculateMaxReplicas(cluster *rayv1alpha1.RayCluster) int32 { count := int32(0) for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { count += *nodeGroup.MaxReplicas @@ -230,7 +230,7 @@ func CalculateMaxReplicas(cluster *rayiov1alpha1.RayCluster) int32 { func CalculateAvailableReplicas(pods corev1.PodList) int32 { count := int32(0) for _, pod := range pods.Items { - if val, ok := pod.Labels["ray.io/node-type"]; !ok || val != string(rayiov1alpha1.WorkerNode) { + if val, ok := pod.Labels["ray.io/node-type"]; !ok || val != string(rayv1alpha1.WorkerNode) { continue } if pod.Status.Phase == corev1.PodRunning { @@ -241,7 +241,7 @@ func CalculateAvailableReplicas(pods corev1.PodList) int32 { return count } -func CalculateDesiredResources(cluster *rayiov1alpha1.RayCluster) corev1.ResourceList { +func CalculateDesiredResources(cluster *rayv1alpha1.RayCluster) corev1.ResourceList { desiredResourcesList := []corev1.ResourceList{{}} headPodResource := calculatePodResource(cluster.Spec.HeadGroupSpec.Template.Spec) for i := int32(0); i < *cluster.Spec.HeadGroupSpec.Replicas; i++ { @@ -256,7 +256,7 @@ func CalculateDesiredResources(cluster *rayiov1alpha1.RayCluster) corev1.Resourc return sumResourceList(desiredResourcesList) } -func CalculateMinResources(cluster *rayiov1alpha1.RayCluster) corev1.ResourceList { +func CalculateMinResources(cluster *rayv1alpha1.RayCluster) corev1.ResourceList { minResourcesList := []corev1.ResourceList{{}} headPodResource := calculatePodResource(cluster.Spec.HeadGroupSpec.Template.Spec) for i := int32(0); i < *cluster.Spec.HeadGroupSpec.Replicas; i++ { @@ -328,7 +328,7 @@ func FilterContainerByName(containers []corev1.Container, name string) (corev1.C // GetHeadGroupServiceAccountName returns the head group service account if it exists. // Otherwise, it returns the name of the cluster itself. -func GetHeadGroupServiceAccountName(cluster *rayiov1alpha1.RayCluster) string { +func GetHeadGroupServiceAccountName(cluster *rayv1alpha1.RayCluster) string { headGroupServiceAccountName := cluster.Spec.HeadGroupSpec.Template.Spec.ServiceAccountName if headGroupServiceAccountName != "" { return headGroupServiceAccountName diff --git a/ray-operator/controllers/ray/utils/util_test.go b/ray-operator/controllers/ray/utils/util_test.go index 291f9292d05..f9a859ebdc8 100644 --- a/ray-operator/controllers/ray/utils/util_test.go +++ b/ray-operator/controllers/ray/utils/util_test.go @@ -7,7 +7,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rayiov1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" + rayv1alpha1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1alpha1" corev1 "k8s.io/api/core/v1" ) @@ -117,17 +117,17 @@ func createSomePod() (pod *corev1.Pod) { func TestGetHeadGroupServiceAccountName(t *testing.T) { tests := map[string]struct { - input *rayiov1alpha1.RayCluster + input *rayv1alpha1.RayCluster want string }{ "Ray cluster with head group service account": { - input: &rayiov1alpha1.RayCluster{ + input: &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ ServiceAccountName: "my-service-account", @@ -139,13 +139,13 @@ func TestGetHeadGroupServiceAccountName(t *testing.T) { want: "my-service-account", }, "Ray cluster without head group service account": { - input: &rayiov1alpha1.RayCluster{ + input: &rayv1alpha1.RayCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "raycluster-sample", Namespace: "default", }, - Spec: rayiov1alpha1.RayClusterSpec{ - HeadGroupSpec: rayiov1alpha1.HeadGroupSpec{ + Spec: rayv1alpha1.RayClusterSpec{ + HeadGroupSpec: rayv1alpha1.HeadGroupSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{}, }, @@ -348,7 +348,7 @@ func TestCalculateAvailableReplicas(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Labels: map[string]string{ - "ray.io/node-type": string(rayiov1alpha1.HeadNode), + "ray.io/node-type": string(rayv1alpha1.HeadNode), }, }, Status: corev1.PodStatus{ @@ -359,7 +359,7 @@ func TestCalculateAvailableReplicas(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Labels: map[string]string{ - "ray.io/node-type": string(rayiov1alpha1.WorkerNode), + "ray.io/node-type": string(rayv1alpha1.WorkerNode), }, }, Status: corev1.PodStatus{ @@ -370,7 +370,7 @@ func TestCalculateAvailableReplicas(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Labels: map[string]string{ - "ray.io/node-type": string(rayiov1alpha1.WorkerNode), + "ray.io/node-type": string(rayv1alpha1.WorkerNode), }, }, Status: corev1.PodStatus{ @@ -381,7 +381,7 @@ func TestCalculateAvailableReplicas(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Labels: map[string]string{ - "ray.io/node-type": string(rayiov1alpha1.WorkerNode), + "ray.io/node-type": string(rayv1alpha1.WorkerNode), }, }, Status: corev1.PodStatus{