forked from pingcap/tidb-operator
/
values.yaml
853 lines (787 loc) · 35 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
# Default values for tidb-cluster.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Also see monitor.serviceAccount
# If you set rbac.create to false, you need to provide a value for monitor.serviceAccount
rbac:
create: true
# Set to true to enable cross-namespace monitoring
crossNamespace: false
# clusterName is the TiDB cluster name, if not specified, the chart release name will be used
# clusterName: demo
# Add extra labels to TidbCluster object
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
extraLabels: {}
# schedulerName must be same with charts/tidb-operator/values#scheduler.schedulerName
schedulerName: tidb-scheduler
# timezone is the default system timzone for TiDB
timezone: UTC
# reclaim policy of PV, default: Retain.
# you must set it to Retain to ensure data safety in production environment.
# https://pingcap.com/docs/stable/tidb-in-kubernetes/reference/configuration/storage-class/#data-safety
pvReclaimPolicy: Retain
# Whether enable PV reclaim.
# When enabling, scale in pd or tikv will trigger PV reclaim automatically
enablePVReclaim: false
# services is the service list to expose, default is ClusterIP
# can be ClusterIP | NodePort | LoadBalancer
services:
- name: pd
type: ClusterIP
discovery:
image: pingcap/tidb-operator:v1.1.4
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 250m
memory: 150Mi
requests:
cpu: 80m
memory: 50Mi
## affinity defines discovery scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# Whether enable ConfigMap Rollout management.
# When enabling, change of ConfigMap will trigger a graceful rolling-update of the component.
# This feature is only available in tidb-operator v1.0 or higher.
# Note: Switch this variable against an existing cluster will cause an rolling-update of each component even
# if the ConfigMap was not changed.
enableConfigMapRollout: true
# The scheduler achieves HA scheduling based on the topologyKey.
# You can modify it to other label of node
haTopologyKey: kubernetes.io/hostname
# Whether enable the TLS connection between TiDB server components
tlsCluster:
# The steps to enable this feature:
# 1. Generate TiDB server components certificates and a client-side certifiacete for them.
# There are multiple ways to generate these certificates:
# - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/
# - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
# - or use cert-manager signed certificates: https://cert-manager.io/
# 2. Create one secret object for one component which contains the certificates created above.
# The name of this Secret must be: <clusterName>-<componentName>-cluster-secret.
# For PD: kubectl create secret generic <clusterName>-pd-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# For TiKV: kubectl create secret generic <clusterName>-tikv-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# For TiDB: kubectl create secret generic <clusterName>-tidb-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# For Client: kubectl create secret generic <clusterName>-cluster-client-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# Same for other components.
# 3. Then create the TiDB cluster with `tlsCluster.enabled` set to `true`.
enabled: false
pd:
# Please refer to https://github.com/pingcap/pd/blob/master/conf/config.toml for the default
# pd configurations (change to the tags of your pd version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file
# (choose the version matching your pd) for detailed explanation of each parameter.
config: |
[log]
level = "info"
[replication]
location-labels = ["region", "zone", "rack", "host"]
# pd Service
# we can only specify clusterIP and loadBalancerIP now
service: {}
# type: "< default use global service type >"
# loadBalancerIP: "<if you set service type as loadBalancerIP,you can specify loadBalancerIP>"
# clusterIP: "<if you set service type as ClusterIP,you can specify ClusterIP>"
# annotations: {} "<the default name will be used>"
# portName: "client"
replicas: 3
image: pingcap/pd:v4.0.6
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests:
# cpu: 4000m
# memory: 4Gi
storage: 1Gi
## affinity defines pd scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## The following is typical example of affinity settings:
## The PodAntiAffinity setting of the example keeps PD pods does not co-locate on a topology node as far as possible to improve the disaster tolerance of PD on Kubernetes.
## The NodeAffinity setting of the example ensure that the PD pods can only be scheduled to nodes with label:[type="pd"],
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# # this term work when the nodes have the label named region
# - weight: 10
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "region"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named zone
# - weight: 20
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "zone"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named rack
# - weight: 40
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "rack"
# namespaces:
# - <helm namespace>
# # this term work when the nodes have the label named kubernetes.io/hostname
# - weight: 80
# podAffinityTerm:
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: <release name>
# app.kubernetes.io/component: "pd"
# topologyKey: "kubernetes.io/hostname"
# namespaces:
# - <helm namespace>
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kind"
# operator: In
# values:
# - "pd"
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of PD Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# sysctls:
# # You can enable these kernel parameters tuning to improve TiDB performance,
# # when the kubelet is configured to allow unsafe sysctls
# - name: net.core.somaxconn
# value: "32768"
# Specify the priorityClassName for PD Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
tikv:
# Please refer to https://github.com/tikv/tikv/blob/master/etc/config-template.toml for the default
# tikv configurations (change to the tags of your tikv version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file
# (choose the version matching your tikv) for detailed explanation of each parameter.
config: |
log-level = "info"
# # Here are some parameters you MUST customize (Please configure in the above `tikv.config` section):
#
# [readpool.coprocessor]
# # Normally these three parameters should be tuned to 80% of `tikv.resources.limits.cpu`, for example: 10000m -> 8
# high-concurrency = 8
# normal-concurrency = 8
# low-concurrency = 8
#
# # For TiKV v2.x:
# [rocksdb.defaultcf]
# ## block-cache used to cache uncompressed blocks, big block-cache can speed up read.
# ## in normal cases should tune to 30%-50% `tikv.resources.limits.memory`
# # block-cache-size = "1GB"
#
# [rocksdb.writecf]
# ## in normal cases should tune to 10%-30% `tikv.resources.limits.memory`
# # block-cache-size = "256MB"
#
# # From TiKV v3.0.0 on, you do not need to configure
# # [rocksdb.defaultcf].block-cache-size and [rocksdb.writecf].block-cache-size.
# # Instead, configure [storage.block-cache] as below:
# [storage.block-cache]
# shared = true
#
# # Normally it should be tuned to 30%-50% of `tikv.resources.limits.memory`, for example: 32Gi -> 16GB
# capacity = "1GB"
# Note that we can't set raftstore.capacity in config because it will be overridden by the command line parameter,
# we can only set capacity in tikv.resources.limits.storage.
replicas: 3
image: pingcap/tikv:v4.0.6
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 16000m
# memory: 32Gi
# storage: 300Gi # We can set capacity here.
requests:
# cpu: 12000m
# memory: 24Gi
storage: 10Gi
## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of TiKV Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# sysctls:
# # You can enable these kernel parameters tuning to improve TiDB performance,
# # when the kubelet is configured to allow unsafe sysctls
# - name: net.core.somaxconn
# value: "32768"
# Specify the priorityClassName for TiKV Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
# When a TiKV node fails, its status turns to `Disconnected`. After 30 minutes, it turns to `Down`.
# After waiting for 5 minutes, TiDB Operator creates a new TiKV node if this TiKV node is still down.
# maxFailoverCount is used to configure the maximum number of TiKV nodes that TiDB Operator can create when failover occurs.
maxFailoverCount: 3
# postArgscript is the script executed after the normal tikv instance start args is built,
# it is recommended to modify the args constructor logic if you have any special needs.
postArgScript: |
if [ ! -z "${STORE_LABELS:-}" ]; then
LABELS=" --labels ${STORE_LABELS} "
ARGS="${ARGS}${LABELS}"
fi
tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
# tidb configurations(change to the tags of your tidb version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# Please refer to https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file
# (choose the version matching your tidb) for detailed explanation of each parameter.
config: |
[log]
level = "info"
# # Here are some parameters you MUST customize (Please configure in the above 'tidb.config' section):
# [performance]
# # Normally it should be tuned to `tidb.resources.limits.cpu`, for example: 16000m -> 16
# max-procs = 0
replicas: 2
# The secret name of root password, you can create secret with following command:
# kubectl create secret generic tidb-secret --from-literal=root=<root-password> --namespace=<namespace>
# If unset, the root password will be empty and you can set it after connecting
# passwordSecretName: tidb-secret
# permitHost is the host which will only be allowed to connect to the TiDB.
# If unset, defaults to '%' which means allow any host to connect to the TiDB.
# permitHost: 127.0.0.1
# initSql is the SQL statements executed after the TiDB cluster is bootstrapped.
# The configmap name of init sql and the name of file must be init-sql with no file extension, you can create configmap with following command:
# kubectl create configmap tidb-initsql --from-file=init-sql=/path/to/your/init-sql --namespace=<namespace>
# If both initSqlConfigMapName and initSql are set, the initSqlConfigMapName will be used.
# initSqlConfigMapName: tidb-initsql
# initSql: |-
# create database app;
image: pingcap/tidb:v4.0.6
# Image pull policy.
imagePullPolicy: IfNotPresent
resources:
limits: {}
# cpu: 16000m
# memory: 16Gi
requests: {}
# cpu: 12000m
# memory: 12Gi
## affinity defines tikv scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## If binlog.pump is enabled, the following affinity is recommended to make tidb and pump deploy at the same node in production
## to avoid losing binlog in tidb if there was network partition error.
# podAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app.kubernetes.io/component"
# operator: In
# values:
# - "pump"
# - key: "app.kubernetes.io/managed-by"
# operator: In
# values:
# - "tidb-operator"
# - key: "app.kubernetes.io/name"
# operator: In
# values:
# - "tidb-cluster"
# - key: "app.kubernetes.io/instance"
# operator: In
# values:
# - <release-name>
# topologyKey: kubernetes.io/hostname
## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
annotations: {}
# Use the host's network namespace if enabled.
# Default to false.
hostNetwork: false
# Specify the security context of TiDB Pod.
# refer to https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
podSecurityContext: {}
# sysctls:
# # You can enable these kernel parameters tuning to improve TiDB performance,
# # when the kubelet is configured to allow unsafe sysctls
# - name: net.core.somaxconn
# value: "32768"
# # Load balancers usually have an idle timeout (eg. AWS NLB idle timeout is 350),
# # the tcp_keepalive_time must be set to lower than LB idle timeout.
# - name: net.ipv4.tcp_keepalive_time
# value: "300"
# - name: net.ipv4.tcp_keepalive_intvl
# value: "75"
# Specify the priorityClassName for TiDB Pod.
# refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#how-to-use-priority-and-preemption
priorityClassName: ""
maxFailoverCount: 3
service:
type: NodePort
exposeStatus: true
# portName: "mysql-client"
# annotations:
# cloud.google.com/load-balancer-type: Internal
separateSlowLog: true
slowLogTailer:
image: busybox:1.26.2
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 20m
memory: 5Mi
initializer:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
# tidb plugin configuration
plugin:
# enable plugin or not
enable: false
# the start argument to specify the folder containing
directory: /plugins
# the start argument to specify the plugin id (name "-" version) that needs to be loaded, e.g. 'conn_limit-1'.
list: ["allowlist-1"]
# Whether enable TLS connection between TiDB server and MySQL client.
# https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
tlsClient:
# The steps to enable this feature:
# 1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster.
# There are multiple ways to generate certificates:
# - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
# - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
# - or use cert-manager signed certificates: https://cert-manager.io/
# 2. Create a K8s Secret object which contains the TiDB server-side certificate created above.
# The name of this Secret must be: <clusterName>-tidb-server-secret.
# kubectl create secret generic <clusterName>-tidb-server-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# 3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator.
# The name of this Secret must be: <clusterName>-tidb-client-secret.
# kubectl create secret generic <clusterName>-tidb-client-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
# 4. Then create the TiDB cluster with `tlsClient.enabled` set to `true`.
enabled: false
# mysqlClient is used to set password for TiDB
# it must has Python MySQL client installed
mysqlClient:
image: tnir/mysqlclient
imagePullPolicy: IfNotPresent
# busybox configures busybox image and pull policy
# You may need to modify this when you want to use a customized one or image
# from your local registry.
busybox:
image: busybox:1.31.1
imagePullPolicy: IfNotPresent
monitor:
create: true
# Also see rbac.create
# If you set rbac.create to false, you need to provide a value here.
# If you set rbac.create to true, you should leave this empty.
# serviceAccount:
persistent: false
storageClassName: local-storage
storage: 10Gi
initializer:
image: pingcap/tidb-monitor-initializer:v4.0.6
imagePullPolicy: IfNotPresent
config:
K8S_PROMETHEUS_URL: http://prometheus-k8s.monitoring.svc:9090
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
reloader:
create: true
image: pingcap/tidb-monitor-reloader:v1.0.1
imagePullPolicy: IfNotPresent
service:
type: NodePort
portName: tcp-reloader
resources: {}
# limits:
# cpu: 50m
# memory: 64Mi
# requests:
# cpu: 50m
# memory: 64Mi
grafana:
create: true
image: grafana/grafana:6.1.6
imagePullPolicy: IfNotPresent
logLevel: info
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
username: admin
password: admin
config:
# Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
# Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
# if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
# GF_SERVER_DOMAIN: foo.bar
# GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
service:
type: NodePort
portName: http-grafana
prometheus:
image: prom/prometheus:v2.18.1
imagePullPolicy: IfNotPresent
logLevel: info
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
service:
type: NodePort
portName: http-prometheus
reserveDays: 12
# alertmanagerURL: ""
# alertManagerRulesVersion is the version of the tidb cluster that used for alert rules.
# default to current tidb cluster version, for example: v3.0.15
#
# alertManagerRulesVersion: ""
nodeSelector: {}
# kind: monitor
# zone: cn-bj1-01,cn-bj1-02
# region: cn-bj1
tolerations: []
# - key: node-role
# operator: Equal
# value: tidb
# effect: "NoSchedule"
binlog:
pump:
create: false
replicas: 1
image: pingcap/tidb-binlog:v4.0.6
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 20Gi
# affinity for pump pod assignment, default: empty
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## The following setting is recommended to make each pump instance be distributed deployed in each node
## Each node is recommended at most have one pump instance deployed.
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: "app.kubernetes.io/component"
# operator: In
# values:
# - "pump"
# - key: "app.kubernetes.io/managed-by"
# operator: In
# values:
# - "tidb-operator"
# - key: "app.kubernetes.io/name"
# operator: In
# values:
# - "tidb-cluster"
# - key: "app.kubernetes.io/instance"
# operator: In
# values:
# - <release-name>
# topologyKey: kubernetes.io/hostname
# tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
syncLog: true
# a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored.
# must bigger than 0
gc: 7
# number of seconds between heartbeat ticks (in 2 seconds)
heartbeatInterval: 2
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
# Please refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/pump/pump.toml for the default
# pump configurations (change to the tags of your pump version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# [security] section will be generated automatically if tlsCluster.enabled is set to true so users do not need to configure it.
# config: |
# gc = 7
# heartbeat-interval = 2
# [storage]
# sync-log = true
# stop-write-at-available-space = "10Gi"
drainer:
create: false
image: pingcap/tidb-binlog:v4.0.6
imagePullPolicy: IfNotPresent
logLevel: info
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 10Gi
# affinity for drainer pod assignment, default: empty
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
# refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
# the number of the concurrency of the downstream for synchronization. The bigger the value,
# the better throughput performance of the concurrency (16 by default)
workerCount: 16
# the interval time (in seconds) of detect pumps' status (default 10)
detectInterval: 10
# disable detect causality
disableDetect: false
# disable dispatching sqls that in one same binlog; if set true, work-count and txn-batch would be useless
disableDispatch: false
# # disable sync these schema
ignoreSchemas: "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test"
# if drainer donesn't have checkpoint, use initial commitTS to initial checkpoint
initialCommitTs: 0
# enable safe mode to make syncer reentrant
safeMode: false
# the number of SQL statements of a transaction that are output to the downstream database (20 by default)
txnBatch: 20
# downstream storage, equal to --dest-db-type
# valid values are "mysql", "file", "tidb", "kafka"
destDBType: file
mysql: {}
# host: "127.0.0.1"
# user: "root"
# password: ""
# port: 3306
# # Time and size limits for flash batch write
# timeLimit: "30s"
# sizeLimit: "100000"
kafka: {}
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
# zookeeperAddrs: "127.0.0.1:2181"
# kafkaAddrs: "127.0.0.1:9092"
# kafkaVersion: "0.8.2.0"
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
# Please refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/drainer/drainer.toml for the default
# drainer configurations (change to the tags of your drainer version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
# config: |
# detect-interval = 10
# [syncer]
# worker-count = 16
# disable-dispatch = false
# ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
# safe-mode = false
# txn-batch = 20
# db-type = "mysql"
# [syncer.to]
# # host = "127.0.0.1"
# # user = "root"
# # password = ""
# # port = 3306
scheduledBackup:
create: false
# https://github.com/pingcap/tidb-cloud-backup
mydumperImage: pingcap/tidb-cloud-backup:20200229
mydumperImagePullPolicy: IfNotPresent
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
# or to arbitrary policies determined by the cluster administrators.
# refer to https://kubernetes.io/docs/concepts/storage/storage-classes
storageClassName: local-storage
storage: 100Gi
# When set to true, cleans up the backup data on storage upon successful upload to the cloud object storage
cleanupAfterUpload: false
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule
schedule: "0 0 * * *"
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#suspend
suspend: false
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#jobs-history-limits
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
# https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#starting-deadline
startingDeadlineSeconds: 3600
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
backoffLimit: 6
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-termination-and-cleanup
# activeDeadlineSeconds: 10800
# https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#handling-pod-and-container-failures
restartPolicy: OnFailure
# -t is thread count, larger thread count will speed up the backup, but may impact the performance of the upstream TiDB.
# -r is the chunk rows for each parallel dump thread to process, this is an option added in the pingcap's fork of mydumper
# Other useful options are -B for database, and -T for tables.
# See https://github.com/maxbube/mydumper/blob/master/docs/mydumper_usage.rst#options for more options.
options: "-t 16 -r 10000 --skip-tz-utc --verbose=3"
# The time limit during which data is retained for each GC when backup, in the format of Go Duration.
# When a GC happens, the current time minus this value is the safe point.
tikvGCLifeTime: 720h
# secretName is the name of the secret which stores user and password used for backup
# Note: you must give the user enough privilege to do the backup
# you can create the secret by:
# kubectl create secret generic backup-secret --from-literal=user=root --from-literal=password=<password>
secretName: backup-secret
# backup to gcp
gcp: {}
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
# https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually
# And then create the secret by: kubectl create secret generic gcp-backup-secret --from-file=./credentials.json
# secretName: gcp-backup-secret
# backup to ceph object storage
ceph: {}
# endpoint: ""
# bucket: ""
# secretName is the name of the secret which stores ceph object store access key and secret key
# You can create the secret by:
# kubectl create secret generic ceph-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# secretName: ceph-backup-secret
# backup to s3
s3: {}
# region: ""
# bucket: ""
# prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key=<access-key> --from-literal=secret_key=<secret-key>
# secretName: s3-backup-secret
resources:
limits: {}
# cpu: 8000m
# memory: 8Gi
requests: {}
# cpu: 4000m
# memory: 4Gi
## affinity defines pd scheduling rules,it's default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
tolerations: []
importer:
create: false
image: pingcap/tidb-lightning:v4.0.6
imagePullPolicy: IfNotPresent
storageClassName: local-storage
storage: 200Gi
resources: {}
# limits:
# cpu: 16000m
# memory: 8Gi
# requests:
# cpu: 16000m
# memory: 8Gi
affinity: {}
tolerations: []
pushgatewayImage: prom/pushgateway:v0.3.1
pushgatewayImagePullPolicy: IfNotPresent
config: |
log-level = "info"
[metric]
job = "tikv-importer"
interval = "15s"
address = "localhost:9091"
metaInstance: "{{ $labels.instance }}"
metaType: "{{ $labels.type }}"
metaValue: "{{ $value }}"