-
Notifications
You must be signed in to change notification settings - Fork 2
/
values.yaml
301 lines (301 loc) · 8.19 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
replicas: 1
# Use this only if you want to replace the default that is .Chart.Name as the name of all the objects.
nameOverride: ""
image:
repository: docker.io/flanksource/incident-commander
pullPolicy: IfNotPresent
tag: "v0.0.633"
global:
# -v, -vv, -vvv
logLevel: ""
imageRegistry: public.ecr.aws
imagePrefix: k4y9r6y5
labels: {}
ui:
host: "mission-control-ui.local"
tlsSecretName: "mission-control-ui-tls"
api:
host: ""
tlsSecretName: ""
otel:
collector: ""
labels: ""
db:
connectionPooler:
enabled: false
secretKeyRef:
name: mission-control-connection-pooler
key: DB_URL
serviceAccount:
annotations: {}
extraContainers: ""
image: bitnami/pgbouncer:1.22.0
serviceAccount:
# Annotations to add to the service account
name: mission-control-sa
annotations: {}
rbac:
# Whether to create cluster-wide or namespaced roles
clusterRole: true
# for secret management with valueFrom
tokenRequest: true
secrets: true
configmaps: true
# for use with kubernetes resource lookups
readAll: true
# Playbook pod actions
podRun: true
# exec
exec: true
extraArgs: {}
externalPostgrest:
enable: true
tag: v10.2.0
# supabase/postgrest if registry is ecr and postgrest/postgrest if registry is docker.io
imageName: supabase/postgrest
logLevel: info
dbScema: public
dbAnonRole: postgrest_anon
maxRows: 2000
# Specify the cel-go script or the file path to the cel script.
# Script is used to map the user identity to the role & teams.
identityRoleMapper:
# specify the script inline
script: ""
# specify the script via a config map that'll be mounted to `mountPath`
configMap:
name: ""
key: ""
mountPath: "/etc/identity-role-mapper"
# Configuration for pushing data to upstream
# upstream_push:
# name: ''
# host: ''
# user: ''
# password: ''
# labels: 'key1=val1,key2=val2'
upstream_push: {}
# Allowed values are [none, kratos,clerk]
authProvider: kratos
clerkJWKSURL: ""
clerkOrgID: ""
otel:
# OpenTelemetry gRPC collector endpoint in host:port format
collector: "{{.Values.global.otel.collector}}"
serviceName: "mission-control"
labels: "{{ .Values.global.otel.labels }}"
# Properties to configure mission-control feature sets
properties:
incidents.disable: true
logs.disable: true
# -v, -vv, -vvv
logLevel: "{{.Values.global.logLevel}}"
jsonLogs: true
db:
create: true
conf:
max_connections: 200
shared_buffers: 1GB
effective_cache_size: 3GB
maintenance_work_mem: 256MB
wal_buffers: 16MB
effective_io_concurrency: 200
work_mem: 10MB
max_wal_size: 4GB
log_autovacuum_min_duration: 0
log_connections: on
log_destination: "stderr"
log_directory: "/var/log/postgresql"
log_file_mode: 0644
log_filename: "postgresql.log"
log_line_prefix: "%m [%p] %q[user=%u,db=%d,app=%a] "
log_lock_waits: on
log_min_duration_statement: "1s"
log_rotation_age: 0
log_rotation_size: 0
log_statement: "all"
log_temp_files: 0
log_timezone: "UTC"
logging_collector: on
ssl: off
timezone: "UTC"
password_encryption: scram-sha-256
db_user_namespace: off
extra_float_digits: 0
secretKeyRef:
name: incident-commander-postgres
key: DB_URL
jwtSecretKeyRef:
name: incident-commander-postgrest-jwt
key: PGRST_JWT_SECRET
storageClass: ''
storage: 20Gi
shmVolume: 256Mi
resources:
requests:
memory: 4Gi
pganalyze:
enabled: false
systemID: mission-control
secretName: pganalyze
smtp:
secretRef:
name: incident-commander-smtp
# Secret object should contain
# SMTP_HOST: <host>
# SMTP_PORT: <port>
# SMTP_USER: <user>
# SMTP_PASSWORD: <password>
adminPassword:
secretKeyRef:
# set to false if you want to pass in an existing secret
create: true
name: mission-control-admin-password
key: password
canary-checker:
image:
type: full
disablePostgrest: true
logLevel: "{{.Values.global.logLevel}}"
otel:
collector: "{{ .Values.global.otel.collector }}"
labels: "{{ .Values.global.otel.labels }}"
db:
runMigrations: false
external:
enabled: true
create: false
secretKeyRef:
name: incident-commander-postgres
key: DB_URL
flanksource-ui:
# Disable UI via canary-checker by default.
enabled: false
config-db:
disablePostgrest: true
otel:
collector: "{{ .Values.global.otel.collector }}"
labels: "{{ .Values.global.otel.labels }}"
logLevel: "{{.Values.global.logLevel}}"
db:
runMigrations: false
embedded:
persist: false
external:
enabled: true
secretKeyRef:
name: incident-commander-postgres
key: DB_URL
apm-hub:
enabled: false
db:
enabled: false
secretKeyRef:
create: false
name: incident-commander-postgres
key: DB_URL
# Enable ingress only if the UI is deployed outside of the cluster and calls public incident-commander api endpoint.
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
host: "{{.Values.global.api.host}}"
tls:
- hosts:
- "{{.Values.global.api.host}}"
secretName: "{{.Values.global.api.tlsSecretName}}"
flanksource-ui:
enabled: true
nameOverride: "incident-manager-ui"
fullnameOverride: "incident-manager-ui"
# This should be $host/api/.ory
oryKratosURL: http://{{.Values.global.ui.host}}/api/.ory
backendURL: http://mission-control:8080
ingress:
enabled: true
host: "{{.Values.global.ui.host}}"
tls:
- hosts:
- "{{.Values.global.ui.host}}"
secretName: "{{.Values.global.ui.tlsSecretName}}"
# - if chart name (incident-commander) is changed, change the urls. E.g.
# oryKratosURI url points to the incident-commander service with a suffix.
#
# - deletion of configmap and using our own secrets file etc. `make chart` will
# extract kratos and delete configmap-config.yaml, so that we can explicitly -
# set config manually. Allows for us to use templates for config and include -
# identity-schema json file (encoding it as part of templating process).
#
# - Disable kratos secret. Again, we explicitly generate secrets.yaml to include
# database URL in it.
kratos:
enabled: true
image:
repository: public.ecr.aws/k4y9r6y5/kratos
deployment:
extraArgs:
- --watch-courier
- --config
- /etc/custom/config/kratos.yaml
extraVolumeMounts:
- name: kratos-custom-config-volume
mountPath: /etc/custom/config
readOnly: true
extraVolumes:
- name: kratos-custom-config-volume
configMap:
name: mission-control-kratos-config
automigration:
customArgs:
- "migrate"
- "sql"
- "-e"
- "--yes"
- "--config"
- "/etc/custom/config/kratos.yaml"
courier:
enabled: false
secret:
# Disable kratos secret generation. We explicitly generate kratos secret to have access to database URL for dns key in secret. See: /chart/templates/secrets.yaml
enabled: false
# don't include Release.name in kratos name
fullnameOverride: kratos
ingress:
public:
enabled: false
# NOTE(ciju): we have our own template for kratos-config. Which means kratos
# templates doesn't have access to the config file, to generate hash from.
# This could be fixed by explicitly passing hash value?
configmap:
hashSumEnabled: false
kratos:
automigration:
enabled: true
# Could be either job or initContainer. initContainer used here because
# job runs as a pre-install,pre-update hook, but requires values from
# secret, which are not yet install when the charts are being installed
# for the first time.
type: initContainer
# Config to be merged in kratos ConfigMap. See: templates/kratos-config.yaml
config:
secrets:
default:
- yet another secret
- lorem ipsum dolores
- just a random a string secret
courier:
smtp:
connection_uri: smtp://wrong-url
session:
# Session lifespan. Default: 14 days
lifespan: 336h
log:
level: warning
resources:
requests:
cpu: 100m
memory: 768Mi
limits:
memory: 1024Mi
cpu: 500m