From 0557904bcb4a534a9df621247db80bf3ff42e096 Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Thu, 14 May 2026 17:01:23 -0400 Subject: [PATCH 1/7] fix(scripts): add migrate subcommand to entrypoint.sh Allows running migrations standalone without booting the webapp server. Used by the umbrella chart's pre-install migration Job to avoid the chicken-and-egg of the Job inheriting webapp.extraEnvVars with SKIP_POSTGRES_MIGRATIONS=1. The migrate subcommand forces SKIP_POSTGRES_MIGRATIONS=0 because that's the Job's only purpose. Co-authored-by: Cursor --- docker/scripts/entrypoint.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docker/scripts/entrypoint.sh b/docker/scripts/entrypoint.sh index a6bc7dd15b9..6d6212a5d49 100755 --- a/docker/scripts/entrypoint.sh +++ b/docker/scripts/entrypoint.sh @@ -1,6 +1,11 @@ #!/bin/sh set -xe +if [ "$1" = "migrate" ]; then + echo "Running migrations only (entrypoint.sh migrate mode)" + SKIP_POSTGRES_MIGRATIONS=0 +fi + if [ -n "$DATABASE_HOST" ]; then scripts/wait-for-it.sh ${DATABASE_HOST} -- echo "database is up" fi @@ -39,6 +44,11 @@ else echo "CLICKHOUSE_URL not set, skipping ClickHouse migrations." fi +if [ "$1" = "migrate" ]; then + echo "Migrations complete, exiting." + exit 0 +fi + # Copy over required prisma files cp internal-packages/database/prisma/schema.prisma apps/webapp/prisma/ cp node_modules/@prisma/engines/*.node apps/webapp/prisma/ From ce5b58b8b1f930d1e11ec9eb982f649c0fcec12b Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Thu, 14 May 2026 17:07:33 -0400 Subject: [PATCH 2/7] fix(chart/webapp): gate OBJECT_STORE env block on s3.external.useIam Previously the block mounted s3-access-key-id / s3-secret-access-key from secrets.existingSecret unconditionally, breaking installs where the operator uses an existing Apollo-managed Secret that doesn't contain those keys. Now three branches: - s3.deploy: true -> existing behavior (chart-managed or s3.auth.existingSecret). - s3.deploy: false + useIam: false: - external.existingSecret -> mount from external secret. - chart-managed accessKeyId -> requires secrets.enabled: true so secrets.yaml actually emits the s3-access-key-id key. - s3.deploy: false + useIam: true -> skip the entire S3 env block. Co-authored-by: Cursor --- hosting/k8s/helm/templates/webapp.yaml | 23 ++++++++++++++++++++--- hosting/k8s/helm/values.yaml | 8 ++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/hosting/k8s/helm/templates/webapp.yaml b/hosting/k8s/helm/templates/webapp.yaml index 4346214e488..d257a996b08 100644 --- a/hosting/k8s/helm/templates/webapp.yaml +++ b/hosting/k8s/helm/templates/webapp.yaml @@ -303,6 +303,24 @@ spec: secretKeyRef: name: {{ include "trigger-v4.secretsName" . }} key: MANAGED_WORKER_SECRET + {{- end }} + {{- /* + Object-store credentials are scoped independently of the four + session secrets above. Three branches: + 1. s3.deploy → mount from the chart's internal MinIO + creds (s3.auth.existingSecret or the + chart-managed s3-auth-* keys). + 2. s3.external.useIam → skip entirely. IRSA / Workload + Identity provides credentials to the + SDK; mounting OBJECT_STORE_* would + force static-key mode. + 3. external static keys → s3.external.existingSecret, OR + s3.external.accessKeyId when the + chart is provisioning its own + secret (requires secrets.enabled + so secrets.yaml actually writes + s3-access-key-id). + */}} {{- if .Values.s3.deploy }} {{- if .Values.s3.auth.existingSecret }} - name: OBJECT_STORE_ACCESS_KEY_ID @@ -327,7 +345,7 @@ spec: name: {{ include "trigger-v4.secretsName" . }} key: s3-auth-secret-access-key {{- end }} - {{- else }} + {{- else if not .Values.s3.external.useIam }} {{- if .Values.s3.external.existingSecret }} - name: OBJECT_STORE_ACCESS_KEY_ID valueFrom: @@ -339,7 +357,7 @@ spec: secretKeyRef: name: {{ include "trigger-v4.s3.external.secretName" . }} key: {{ include "trigger-v4.s3.external.secretAccessKeyKey" . }} - {{- else if .Values.s3.external.accessKeyId }} + {{- else if and .Values.secrets.enabled .Values.s3.external.accessKeyId }} - name: OBJECT_STORE_ACCESS_KEY_ID valueFrom: secretKeyRef: @@ -352,7 +370,6 @@ spec: key: s3-secret-access-key {{- end }} {{- end }} - {{- end }} {{- if .Values.webapp.observability }} {{- if .Values.webapp.observability.tracing.exporterUrl }} - name: INTERNAL_OTEL_TRACE_EXPORTER_URL diff --git a/hosting/k8s/helm/values.yaml b/hosting/k8s/helm/values.yaml index 9f7fbbeb3e4..13eaaebcd28 100644 --- a/hosting/k8s/helm/values.yaml +++ b/hosting/k8s/helm/values.yaml @@ -756,6 +756,14 @@ s3: accessKeyId: "admin" # Default for internal MinIO - change for production secretAccessKey: "very-safe-password" # Default for internal MinIO - change for production # + # IAM-based authentication (for AWS S3 with IRSA / GCP Workload Identity). + # When true, the webapp pod skips the OBJECT_STORE_ACCESS_KEY_ID / + # OBJECT_STORE_SECRET_ACCESS_KEY env vars entirely so the AWS SDK can + # fall back to its credential-provider chain (instance metadata, + # web-identity-token file, etc.). Pair with a ServiceAccount + # `eks.amazonaws.com/role-arn` annotation on `webapp.serviceAccount`. + useIam: false + # # Secure credential management existingSecret: "" # Name of existing secret containing S3 credentials existingSecretAccessKeyIdKey: "access-key-id" # Key in existing secret containing access key ID From a5e8ac508ef4b707efb1262346415d276aaa2534 Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Thu, 14 May 2026 17:07:50 -0400 Subject: [PATCH 3/7] fix(chart/electric): mount /app/persistent + accept extraVolumes Electric requires /app/persistent to be writable for its state dir. The template previously rendered no volumes at all, so every Electric pod crashed with 'could not make directory /app/persistent/state'. This commit always renders a /app/persistent emptyDir and adds extraVolumes/extraVolumeMounts knobs matching the webapp/supervisor pattern for consumers that want PVC-backed durability. Co-authored-by: Cursor --- hosting/k8s/helm/templates/electric.yaml | 12 ++++++++++++ hosting/k8s/helm/values.yaml | 17 +++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/hosting/k8s/helm/templates/electric.yaml b/hosting/k8s/helm/templates/electric.yaml index ac2d7582f26..6c7dbe9bbc9 100644 --- a/hosting/k8s/helm/templates/electric.yaml +++ b/hosting/k8s/helm/templates/electric.yaml @@ -89,6 +89,18 @@ spec: {{- end }} resources: {{- toYaml .Values.electric.resources | nindent 12 }} + volumeMounts: + - name: electric-persistent + mountPath: /app/persistent + {{- with .Values.electric.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: electric-persistent + emptyDir: {} + {{- with .Values.electric.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} --- apiVersion: v1 kind: Service diff --git a/hosting/k8s/helm/values.yaml b/hosting/k8s/helm/values.yaml index 13eaaebcd28..c4a539c8f03 100644 --- a/hosting/k8s/helm/values.yaml +++ b/hosting/k8s/helm/values.yaml @@ -640,6 +640,23 @@ electric: # - name: CUSTOM_VAR # value: "custom-value" + # Extra volumes added to the Electric pod. The Electric container always + # has an emptyDir at /app/persistent regardless of this list — that's + # required by the Electric runtime. Use `extraVolumes` for things like + # an enterprise CA bundle ConfigMap. + extraVolumes: + [] + # - name: ca-bundle + # configMap: + # name: enterprise-ca-bundle + + # Extra volume mounts added to the Electric container. + extraVolumeMounts: + [] + # - name: ca-bundle + # mountPath: /etc/ssl/enterprise-ca + # readOnly: true + # ClickHouse configuration # Subchart: https://github.com/bitnami/charts/tree/main/bitnami/clickhouse clickhouse: From 8128b8e702f902e231c4d59eca37f2924425cefa Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Thu, 14 May 2026 17:08:26 -0400 Subject: [PATCH 4/7] fix(chart/clickhouse): respect auth.existingSecret in URL helper When clickhouse.deploy: true AND auth.existingSecret is set, the fork's clickhouse.url helper still interpolated values.auth.password literally, so webapp authenticated with a stale default while the Bitnami chart used the real password from the secret. This commit switches the deploy-mode URL to use a CLICKHOUSE_PASSWORD env-var indirection that resolves from existingSecret when set. Co-authored-by: Cursor --- hosting/k8s/helm/templates/_helpers.tpl | 11 +++++++---- hosting/k8s/helm/templates/webapp.yaml | 12 +++++++++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/hosting/k8s/helm/templates/_helpers.tpl b/hosting/k8s/helm/templates/_helpers.tpl index 8615a34e0ae..da22edf65c7 100644 --- a/hosting/k8s/helm/templates/_helpers.tpl +++ b/hosting/k8s/helm/templates/_helpers.tpl @@ -401,11 +401,14 @@ ClickHouse hostname {{/* ClickHouse URL for application (with secure parameter) -Note on the external+existingSecret branch: the password is expanded via +Note on `$(CLICKHOUSE_PASSWORD)`: the password is expanded via Kubernetes' `$(VAR)` syntax, not shell `${VAR}`. Kubelet substitutes `$(CLICKHOUSE_PASSWORD)` at container-creation time from the CLICKHOUSE_PASSWORD env var declared just before CLICKHOUSE_URL in -webapp.yaml. Shell-style `${...}` does not work here because +webapp.yaml. Both the `deploy: true` and external+existingSecret +branches use this placeholder so that the chart never bakes the +password literal into a rendered URL string. Shell-style `${...}` +does not work here because `docker/scripts/entrypoint.sh` assigns CLICKHOUSE_URL to GOOSE_DBSTRING with a single-pass expansion (`export GOOSE_DBSTRING="$CLICKHOUSE_URL"`), so any inner `${...}` reaches goose verbatim and fails URL parsing. @@ -418,7 +421,7 @@ hex-encoded password or percent-encode before storing in the Secret. {{- if .Values.clickhouse.deploy -}} {{- $protocol := ternary "https" "http" .Values.clickhouse.secure -}} {{- $secure := ternary "true" "false" .Values.clickhouse.secure -}} -{{ $protocol }}://{{ .Values.clickhouse.auth.username }}:{{ .Values.clickhouse.auth.password }}@{{ include "trigger-v4.clickhouse.hostname" . }}:8123?secure={{ $secure }} +{{ $protocol }}://{{ .Values.clickhouse.auth.username }}:$(CLICKHOUSE_PASSWORD)@{{ include "trigger-v4.clickhouse.hostname" . }}:8123?secure={{ $secure }} {{- else if .Values.clickhouse.external.host -}} {{- $protocol := ternary "https" "http" .Values.clickhouse.external.secure -}} {{- $secure := ternary "true" "false" .Values.clickhouse.external.secure -}} @@ -439,7 +442,7 @@ applies to the replication URL. {{- define "trigger-v4.clickhouse.replication.url" -}} {{- if .Values.clickhouse.deploy -}} {{- $protocol := ternary "https" "http" .Values.clickhouse.secure -}} -{{ $protocol }}://{{ .Values.clickhouse.auth.username }}:{{ .Values.clickhouse.auth.password }}@{{ include "trigger-v4.clickhouse.hostname" . }}:8123 +{{ $protocol }}://{{ .Values.clickhouse.auth.username }}:$(CLICKHOUSE_PASSWORD)@{{ include "trigger-v4.clickhouse.hostname" . }}:8123 {{- else if .Values.clickhouse.external.host -}} {{- $protocol := ternary "https" "http" .Values.clickhouse.external.secure -}} {{- if .Values.clickhouse.external.existingSecret -}} diff --git a/hosting/k8s/helm/templates/webapp.yaml b/hosting/k8s/helm/templates/webapp.yaml index d257a996b08..61636c1ccc5 100644 --- a/hosting/k8s/helm/templates/webapp.yaml +++ b/hosting/k8s/helm/templates/webapp.yaml @@ -404,7 +404,17 @@ spec: - name: INTERNAL_OTEL_METRIC_EXPORTER_INTERVAL_MS value: {{ .Values.webapp.observability.metrics.exporterIntervalMs | quote }} {{- end }} - {{- if and .Values.clickhouse.external.host .Values.clickhouse.external.existingSecret }} + {{- if .Values.clickhouse.deploy }} + - name: CLICKHOUSE_PASSWORD + {{- if .Values.clickhouse.auth.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.clickhouse.auth.existingSecret }} + key: {{ .Values.clickhouse.auth.existingSecretKey | default "admin-password" }} + {{- else }} + value: {{ .Values.clickhouse.auth.password | quote }} + {{- end }} + {{- else if and .Values.clickhouse.external.host .Values.clickhouse.external.existingSecret }} - name: CLICKHOUSE_PASSWORD valueFrom: secretKeyRef: From 8b5734e38f9d17b9653f7d6dd642e605781d287f Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Thu, 14 May 2026 17:08:31 -0400 Subject: [PATCH 5/7] chore(chart): bump version to 4.4.5-plt663.1 Carries the four PLT-663 chart fixes above. Co-authored-by: Cursor --- hosting/k8s/helm/Chart.lock | 2 +- hosting/k8s/helm/Chart.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hosting/k8s/helm/Chart.lock b/hosting/k8s/helm/Chart.lock index ac445fac172..77c3de5f5a0 100644 --- a/hosting/k8s/helm/Chart.lock +++ b/hosting/k8s/helm/Chart.lock @@ -12,4 +12,4 @@ dependencies: repository: oci://registry-1.docker.io/bitnamicharts version: 17.0.9 digest: sha256:b6cef61abc0b8bcdf4e6d7d86bd8dd7999dd07543f5532f3d94797ffdf0ad30b -generated: "2025-06-27T19:27:24.075488134+01:00" +generated: "2026-05-14T16:37:57.189396-04:00" diff --git a/hosting/k8s/helm/Chart.yaml b/hosting/k8s/helm/Chart.yaml index e3b3c886456..34cba4ea256 100644 --- a/hosting/k8s/helm/Chart.yaml +++ b/hosting/k8s/helm/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: trigger description: The official Trigger.dev Helm chart type: application -version: 4.4.4 +version: 4.4.5-plt663.1 appVersion: v4.4.4 home: https://trigger.dev sources: From 60e3ebeabf9242a0a4465243d67de8c030f47825 Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Fri, 15 May 2026 15:51:34 -0400 Subject: [PATCH 6/7] fix(supervisor): type worker pod annotations as Record @kubernetes/client-node@1.0.0 tightened V1ObjectMeta.annotations from 'unknown' to 'Record'. The parsed JSON from the KUBERNETES_WORKER_POD_ANNOTATIONS env var lands as 'unknown' which now fails TS2322 at the assignment site. Cast (or validate) at the parse boundary. Co-authored-by: Cursor --- apps/supervisor/src/workloadManager/kubernetes.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/apps/supervisor/src/workloadManager/kubernetes.ts b/apps/supervisor/src/workloadManager/kubernetes.ts index 986a885e4ad..4198c600414 100644 --- a/apps/supervisor/src/workloadManager/kubernetes.ts +++ b/apps/supervisor/src/workloadManager/kubernetes.ts @@ -118,7 +118,11 @@ export class KubernetesWorkloadManager implements WorkloadManager { "app.kubernetes.io/component": "create", }, ...(Object.keys(env.KUBERNETES_WORKER_POD_ANNOTATIONS).length > 0 - ? { annotations: { ...env.KUBERNETES_WORKER_POD_ANNOTATIONS } } + ? { + annotations: { + ...env.KUBERNETES_WORKER_POD_ANNOTATIONS, + } as Record, + } : {}), }, spec: { From a63fe156d690235c1a09345f64d205a1ff8b5948 Mon Sep 17 00:00:00 2001 From: Conner <20548516+ConProgramming@users.noreply.github.com> Date: Fri, 15 May 2026 18:29:20 -0400 Subject: [PATCH 7/7] fix(supervisor): wire workerPodSecurityContext/etc env vars PR #12 added supervisor.config.kubernetes.workerPodSecurityContext, workerContainerSecurityContext, and workerPodAnnotations to values.yaml but the supervisor.yaml template never read them. The supervisor's Kubernetes workload manager reads KUBERNETES_WORKER_POD_SECURITY_CONTEXT, KUBERNETES_WORKER_CONTAINER_SECURITY_CONTEXT, and KUBERNETES_WORKER_POD_ANNOTATIONS env vars at runtime (JSON-parsed) and applies them to every worker pod it schedules. Without this wiring, worker pods on FedStart / GameWarden deployments are missing their compliance-required securityContext entries and would be rejected by pod-security admission. Co-authored-by: Cursor --- hosting/k8s/helm/Chart.yaml | 2 +- hosting/k8s/helm/templates/supervisor.yaml | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hosting/k8s/helm/Chart.yaml b/hosting/k8s/helm/Chart.yaml index 34cba4ea256..9438e406e87 100644 --- a/hosting/k8s/helm/Chart.yaml +++ b/hosting/k8s/helm/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: trigger description: The official Trigger.dev Helm chart type: application -version: 4.4.5-plt663.1 +version: 4.4.5-plt663.2 appVersion: v4.4.4 home: https://trigger.dev sources: diff --git a/hosting/k8s/helm/templates/supervisor.yaml b/hosting/k8s/helm/templates/supervisor.yaml index 8f25216b11c..316df4da570 100644 --- a/hosting/k8s/helm/templates/supervisor.yaml +++ b/hosting/k8s/helm/templates/supervisor.yaml @@ -174,6 +174,18 @@ spec: value: {{ .Values.supervisor.config.kubernetes.workerServiceAccount | quote }} - name: KUBERNETES_WORKER_AUTOMOUNT_SERVICE_ACCOUNT_TOKEN value: {{ .Values.supervisor.config.kubernetes.workerAutomountServiceAccountToken | quote }} + {{- if .Values.supervisor.config.kubernetes.workerPodSecurityContext }} + - name: KUBERNETES_WORKER_POD_SECURITY_CONTEXT + value: {{ .Values.supervisor.config.kubernetes.workerPodSecurityContext | toJson | quote }} + {{- end }} + {{- if .Values.supervisor.config.kubernetes.workerContainerSecurityContext }} + - name: KUBERNETES_WORKER_CONTAINER_SECURITY_CONTEXT + value: {{ .Values.supervisor.config.kubernetes.workerContainerSecurityContext | toJson | quote }} + {{- end }} + {{- if .Values.supervisor.config.kubernetes.workerPodAnnotations }} + - name: KUBERNETES_WORKER_POD_ANNOTATIONS + value: {{ .Values.supervisor.config.kubernetes.workerPodAnnotations | toJson | quote }} + {{- end }} {{- $registryAuthEnabled := false }} {{- if .Values.registry.deploy }} {{- $registryAuthEnabled = .Values.registry.auth.enabled }}