Skip to content

Commit b51df11

Browse files
tedhtchangopenshift-ci[bot]
authored andcommitted
update aw examples for the kuberay integration doc
1 parent 31f13e4 commit b51df11

File tree

3 files changed

+45
-53
lines changed

3 files changed

+45
-53
lines changed

doc/usage/examples/kuberay/config/aw-raycluster-1.yaml

+22-26
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
apiVersion: mcad.ibm.com/v1beta1
1+
apiVersion: workload.codeflare.dev/v1beta1
22
kind: AppWrapper
33
metadata:
44
name: raycluster-complete-1
@@ -8,29 +8,26 @@ spec:
88
GenericItems:
99
- replicas: 1
1010
custompodresources:
11-
# Optional section that specifies resource requirements
12-
# for non-standard k8s resources, follows same format as
13-
# that of standard k8s resources.
1411
# Each item in the custompodresources stanza should include resources consumed by target Item.
1512
# In this example, the 2 items correspond to 1 Ray head pod and 1 Ray worker pod
1613
- replicas: 1
1714
limits:
18-
cpu: 2
19-
memory: 8G
15+
cpu: 1
16+
memory: 2G
2017
nvidia.com/gpu: 0
2118
requests:
22-
cpu: 2
23-
memory: 8G
19+
cpu: 1
20+
memory: 2G
2421
nvidia.com/gpu: 0
2522
# The replica should match the number of worker pods
2623
- replicas: 1
2724
limits:
28-
cpu: 8
29-
memory: 8G
25+
cpu: 2
26+
memory: 2G
3027
nvidia.com/gpu: 0
3128
requests:
32-
cpu: 8
33-
memory: 8G
29+
cpu: 2
30+
memory: 2G
3431
nvidia.com/gpu: 0
3532
generictemplate:
3633
# The resource requests and limits in this config are too small for production!
@@ -65,7 +62,7 @@ spec:
6562
spec:
6663
containers:
6764
- name: ray-head
68-
image: rayproject/ray:2.5.0
65+
image: quay.io/project-codeflare/ray:2.5.0-py38-cu116
6966
ports:
7067
- containerPort: 6379
7168
name: gcs
@@ -88,21 +85,21 @@ spec:
8885
# entire Kubernetes node on which it is scheduled.
8986
resources:
9087
limits:
91-
cpu: "2"
92-
memory: "8G"
88+
cpu: "1"
89+
memory: "2G"
9390
requests:
9491
# For production use-cases, we recommend specifying integer CPU reqests and limits.
9592
# We also recommend setting requests equal to limits for both CPU and memory.
96-
cpu: "2"
97-
memory: "8G"
93+
cpu: "1"
94+
memory: "2G"
9895
volumes:
9996
- name: ray-logs
10097
emptyDir: {}
10198
workerGroupSpecs:
10299
# the pod replicas in this group typed worker
103100
- replicas: 1
104101
minReplicas: 1
105-
maxReplicas: 10
102+
maxReplicas: 1
106103
# logical group name, for this called small-group, also can be functional
107104
groupName: small-group
108105
# If worker pods need to be added, we can increment the replicas.
@@ -124,7 +121,7 @@ spec:
124121
spec:
125122
containers:
126123
- name: ray-worker
127-
image: rayproject/ray:2.5.0
124+
image: quay.io/project-codeflare/ray:2.5.0-py38-cu116
128125
lifecycle:
129126
preStop:
130127
exec:
@@ -142,19 +139,18 @@ spec:
142139
# entire Kubernetes node on which it is scheduled.
143140
resources:
144141
limits:
145-
cpu: "8"
146-
memory: "8G"
147-
# For production use-cases, we recommend specifying integer CPU reqests and limits.
142+
cpu: "2"
143+
memory: "2G"
144+
# For production use-cases, we recommend specifying integer CPU requests and limits.
148145
# We also recommend setting requests equal to limits for both CPU and memory.
149146
requests:
150-
# For production use-cases, we recommend specifying integer CPU reqests and limits.
147+
# For production use-cases, we recommend specifying integer CPU requests and limits.
151148
# We also recommend setting requests equal to limits for both CPU and memory.
152-
cpu: "8"
149+
cpu: "2"
153150
# For production use-cases, we recommend allocating at least 8Gb memory for each Ray container.
154-
memory: "8G"
151+
memory: "2G"
155152
# use volumes
156153
# Refer to https://kubernetes.io/docs/concepts/storage/volumes/
157154
volumes:
158155
- name: ray-logs
159156
emptyDir: {}
160-

doc/usage/examples/kuberay/config/aw-raycluster.yaml

+21-25
Original file line numberDiff line numberDiff line change
@@ -8,29 +8,26 @@ spec:
88
GenericItems:
99
- replicas: 1
1010
custompodresources:
11-
# Optional section that specifies resource requirements
12-
# for non-standard k8s resources, follows same format as
13-
# that of standard k8s resources.
1411
# Each item in the custompodresources stanza should include resources consumed by target Item.
1512
# In this example, the 2 items correspond to 1 Ray head pod and 1 Ray worker pod
1613
- replicas: 1
1714
limits:
18-
cpu: 2
19-
memory: 8G
15+
cpu: 1
16+
memory: 2G
2017
nvidia.com/gpu: 0
2118
requests:
22-
cpu: 2
23-
memory: 8G
19+
cpu: 1
20+
memory: 2G
2421
nvidia.com/gpu: 0
2522
# The replica should match the number of worker pods
2623
- replicas: 1
2724
limits:
28-
cpu: 8
29-
memory: 8G
25+
cpu: 2
26+
memory: 2G
3027
nvidia.com/gpu: 0
3128
requests:
32-
cpu: 8
33-
memory: 8G
29+
cpu: 2
30+
memory: 2G
3431
nvidia.com/gpu: 0
3532
generictemplate:
3633
# The resource requests and limits in this config are too small for production!
@@ -65,7 +62,7 @@ spec:
6562
spec:
6663
containers:
6764
- name: ray-head
68-
image: rayproject/ray:2.5.0
65+
image: quay.io/project-codeflare/ray:2.5.0-py38-cu116
6966
ports:
7067
- containerPort: 6379
7168
name: gcs
@@ -88,21 +85,21 @@ spec:
8885
# entire Kubernetes node on which it is scheduled.
8986
resources:
9087
limits:
91-
cpu: "2"
92-
memory: "8G"
88+
cpu: "1"
89+
memory: "2G"
9390
requests:
9491
# For production use-cases, we recommend specifying integer CPU reqests and limits.
9592
# We also recommend setting requests equal to limits for both CPU and memory.
96-
cpu: "2"
97-
memory: "8G"
93+
cpu: "1"
94+
memory: "2G"
9895
volumes:
9996
- name: ray-logs
10097
emptyDir: {}
10198
workerGroupSpecs:
10299
# the pod replicas in this group typed worker
103100
- replicas: 1
104101
minReplicas: 1
105-
maxReplicas: 10
102+
maxReplicas: 1
106103
# logical group name, for this called small-group, also can be functional
107104
groupName: small-group
108105
# If worker pods need to be added, we can increment the replicas.
@@ -124,7 +121,7 @@ spec:
124121
spec:
125122
containers:
126123
- name: ray-worker
127-
image: rayproject/ray:2.5.0
124+
image: quay.io/project-codeflare/ray:2.5.0-py38-cu116
128125
lifecycle:
129126
preStop:
130127
exec:
@@ -142,19 +139,18 @@ spec:
142139
# entire Kubernetes node on which it is scheduled.
143140
resources:
144141
limits:
145-
cpu: "8"
146-
memory: "8G"
147-
# For production use-cases, we recommend specifying integer CPU reqests and limits.
142+
cpu: "2"
143+
memory: "2G"
144+
# For production use-cases, we recommend specifying integer CPU requests and limits.
148145
# We also recommend setting requests equal to limits for both CPU and memory.
149146
requests:
150-
# For production use-cases, we recommend specifying integer CPU reqests and limits.
147+
# For production use-cases, we recommend specifying integer CPU requests and limits.
151148
# We also recommend setting requests equal to limits for both CPU and memory.
152-
cpu: "8"
149+
cpu: "2"
153150
# For production use-cases, we recommend allocating at least 8Gb memory for each Ray container.
154-
memory: "8G"
151+
memory: "2G"
155152
# use volumes
156153
# Refer to https://kubernetes.io/docs/concepts/storage/volumes/
157154
volumes:
158155
- name: ray-logs
159156
emptyDir: {}
160-

doc/usage/examples/kuberay/kuberay-mcad.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ This integration will help in queuing on [kuberay](https://github.com/ray-projec
2727
Install kuberay operator using the [instructions](https://github.com/ray-project/kuberay#quick-start). For example, install kuberay v0.6.0 from remote helm repo:
2828
```
2929
helm repo add kuberay https://ray-project.github.io/kuberay-helm/
30-
helm install kuberay-operator kuberay/kuberay-operator --version 0.6.0
30+
helm install kuberay-operator kuberay/kuberay-operator --set image.repository=quay.io/kuberay/operator --set image.tag=v0.6.0
3131
```
3232

3333
- OpenShift cluster:
@@ -38,7 +38,7 @@ This integration will help in queuing on [kuberay](https://github.com/ray-projec
3838
#### Steps
3939

4040

41-
- Submit the RayCluster custom resource to MCAD as AppWrapper using the [aw-raycluster.yaml](doc/usage/examples/kuberay/config/aw-raycluster.yaml) exmaple:
41+
- Submit the RayCluster custom resource to MCAD as AppWrapper using the [aw-raycluster.yaml](config/aw-raycluster.yaml) exmaple:
4242
```bash
4343
kubectl create -f doc/usage/examples/kuberay/config/aw-raycluster.yaml
4444
```

0 commit comments

Comments
 (0)