Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 92 additions & 0 deletions templates/http/base/deployment-model-server.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
tad.gitops.set/image: ".spec.template.spec.containers[0].image"
tad.gitops.get/image: ".spec.template.spec.containers[0].image"
tad.gitops.set/replicas: ".spec.replicas"
tad.gitops.get/replicas: ".spec.replicas"
labels:
app.kubernetes.io/instance: {{values.name}}-model-server
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/name: {{values.name}}-model-server
app.kubernetes.io/part-of: {{values.name}}
name: {{values.name}}-model-server
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: {{values.name}}-model-server
template:
metadata:
labels:
app.kubernetes.io/instance: {{values.name}}-model-server
spec:
{%- if values.vllmSelected == nil or not(values.vllmSelected) %}
initContainers:
- name: model-file
image: {{values.initContainer}}
command: ['/usr/bin/install', "/model/model.file", "/shared/"]
volumeMounts:
- name: model-file
mountPath: /shared
{%- endif %}
containers:
{%- if values.vllmSelected %}
- image: {{values.vllmModelServiceContainer}}
args: [
"--model",
"{{values.vllmModelName}}",
"--port",
"{{values.modelServicePort}}",
"--download-dir",
"/models-cache",
"--max-model-len",
"{{values.maxModelLength}}"]
resources:
limits:
nvidia.com/gpu: '1'
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: models-cache
mountPath: /models-cache
{%- else %}
- env:
- name: HOST
value: "0.0.0.0"
- name: PORT
value: "{{values.modelServicePort}}"
- name: MODEL_PATH
value: /model/model.file
- name: CHAT_FORMAT
value: openchat
image: {{values.modelServiceContainer}}
volumeMounts:
- name: model-file
mountPath: /model
{%- endif %}
name: app-model-service
ports:
- containerPort: {{values.modelServicePort}}
securityContext:
runAsNonRoot: true
{%- if values.vllmSelected %}
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: "2Gi"
- name: models-cache
persistentVolumeClaim:
claimName: {{values.name}}

tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
{%- else %}
volumes:
- name: model-file
emptyDir: {}
{%- endif %}
69 changes: 2 additions & 67 deletions templates/http/base/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,10 @@ spec:
labels:
app.kubernetes.io/instance: {{values.name}}
spec:
{%- if values.vllmSelected == nil or not(values.vllmSelected) %}
initContainers:
- name: model-file
image: {{values.initContainer}}
command: ['/usr/bin/install', "/model/model.file", "/shared/"]
volumeMounts:
- name: model-file
mountPath: /shared
{%- endif %}
containers:
- env:
- name: MODEL_ENDPOINT
value: http://0.0.0.0:{{values.modelServicePort}}
value: http://{{values.name}}-model-server:{{values.modelServicePort}}
{%- if values.vllmSelected %}
- name: MODEL_NAME
value: "{{values.vllmModelName}}"
Expand All @@ -44,60 +35,4 @@ spec:
ports:
- containerPort: {{values.appPort}}
securityContext:
runAsNonRoot: true
{%- if values.vllmSelected %}
- image: {{values.vllmModelServiceContainer}}
args: [
"--model",
"{{values.vllmModelName}}",
"--port",
"{{values.modelServicePort}}",
"--download-dir",
"/models-cache",
"--max-model-len",
"{{values.maxModelLength}}"]
resources:
limits:
nvidia.com/gpu: '1'
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: models-cache
mountPath: /models-cache
{%- else %}
- env:
- name: HOST
value: "0.0.0.0"
- name: PORT
value: "{{values.modelServicePort}}"
- name: MODEL_PATH
value: /model/model.file
image: {{values.modelServiceContainer}}
volumeMounts:
- name: model-file
mountPath: /model
{%- endif %}
name: app-model-service
ports:
- containerPort: {{values.modelServicePort}}
securityContext:
runAsNonRoot: true
{%- if values.vllmSelected %}
volumes:
- name: dshm
emptyDir:
medium: Memory
sizeLimit: "2Gi"
- name: models-cache
persistentVolumeClaim:
claimName: {{values.name}}

tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
{%- else %}
volumes:
- name: model-file
emptyDir: {}
{%- endif %}
runAsNonRoot: true
2 changes: 2 additions & 0 deletions templates/http/base/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ resources:
{%- if values.vllmSelected %}
- pvc.yaml
{%- endif %}
- deployment-model-server.yaml
- service-model-server.yaml
- deployment.yaml
- route.yaml
- service.yaml
15 changes: 15 additions & 0 deletions templates/http/base/service-model-server.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: {{values.name}}-model-server
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/name: {{values.name}}-model-server
name: {{values.name}}-model-server
spec:
ports:
- port: {{values.modelServicePort}}
protocol: TCP
targetPort: {{values.modelServicePort}}
selector:
app.kubernetes.io/instance: {{values.name}}-model-server