-
Notifications
You must be signed in to change notification settings - Fork 0
/
to_kafka.yaml
1060 lines (1025 loc) · 43.5 KB
/
to_kafka.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright VMware, Inc.
# SPDX-License-Identifier: APACHE-2.0
## @section Global parameters
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass
## @param global.imageRegistry Global Docker image registry
## @param global.imagePullSecrets Global Docker registry secret names as an array
## @param global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: ""
## Compatibility adaptations for Kubernetes platforms
##
compatibility:
## Compatibility adaptations for Openshift
##
openshift:
## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation)
##
adaptSecurityContext: disabled
## @section Common parameters
## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set)
##
kubeVersion: ""
## @param nameOverride String to partially override common.names.fullname template (will maintain the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname template
##
fullnameOverride: ""
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param clusterDomain Cluster Domain
##
clusterDomain: cluster.local
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []
## Enable diagnostic mode in the deployment
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the deployment
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the deployment
##
args:
- infinity
## @section Fluentd parameters
## Bitnami Fluentd image version
## ref: https://hub.docker.com/r/bitnami/fluentd/tags/
## @param image.registry [default: REGISTRY_NAME] Fluentd image registry
## @param image.repository [default: REPOSITORY_NAME/fluentd] Fluentd image repository
## @skip image.tag Fluentd image tag (immutable tags are recommended)
## @param image.pullPolicy Fluentd image pull policy
## @param image.pullSecrets Fluentd image pull secrets
## @param image.debug Enable image debug mode
##
image:
registry: docker.io
repository: bitnami/fluentd
tag: 1.16.3-debian-12-r12
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
## pullSecrets:
## - myRegistryKeySecretName
pullSecrets: []
## Enable debug mode
##
debug: false
forwarder:
enabled: false
## Aggregator parameters
##
aggregator:
## @param aggregator.enabled Enable Fluentd aggregator statefulset
##
enabled: true
## @param aggregator.image.registry [default: ""] Fluentd aggregator image registry override
## @param aggregator.image.repository [default: ""] Fluentd aggregator image repository override
## @skip aggregator.image.tag Fluentd aggregator image tag override (immutable tags are recommended)
image:
registry: ""
repository: ""
tag: ""
## @param aggregator.replicaCount Number of aggregator pods to deploy in the Stateful Set
##
replicaCount: 1
## K8s Security Context for Aggregator pods
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param aggregator.podSecurityContext.enabled Enable security context for aggregator pods
## @param aggregator.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy
## @param aggregator.podSecurityContext.sysctls Set kernel settings using the sysctl interface
## @param aggregator.podSecurityContext.supplementalGroups Set filesystem extra groups
## @param aggregator.podSecurityContext.fsGroup Group ID for aggregator's containers filesystem
##
podSecurityContext:
enabled: true
fsGroupChangePolicy: Always
sysctls: []
supplementalGroups: []
fsGroup: 1001
## @param aggregator.automountServiceAccountToken Mount Service Account token in pod
##
automountServiceAccountToken: false
## @param aggregator.hostAliases Add deployment host aliases
## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
##
hostAliases: []
## K8s Security Context for Aggregator containers
## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
## @param aggregator.containerSecurityContext.enabled Enable security context for the aggregator container
## @param aggregator.containerSecurityContext.privileged Run as privileged
## @param aggregator.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container
## @param aggregator.containerSecurityContext.runAsUser User ID for aggregator's containers
## @param aggregator.containerSecurityContext.runAsGroup Group ID for aggregator's containers
## @param aggregator.containerSecurityContext.allowPrivilegeEscalation Allow Privilege Escalation
## @param aggregator.containerSecurityContext.readOnlyRootFilesystem Require the use of a read only root file system
## @param aggregator.containerSecurityContext.capabilities.drop [array] Drop capabilities for the securityContext
## @param aggregator.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile
##
containerSecurityContext:
enabled: true
privileged: false
seLinuxOptions: null
runAsUser: 1001
runAsGroup: 1001
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
seccompProfile:
type: "RuntimeDefault"
## @param aggregator.terminationGracePeriodSeconds Duration in seconds the pod needs to terminate gracefully
## https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/
##
terminationGracePeriodSeconds: 30
## @param aggregator.extraGems List of extra gems to be installed. Can be used to install additional fluentd plugins.
##
extraGems: # []
# - name: fluent-plugin-beats
# version: 2.1.2
## @param aggregator.configFile Name of the config file that will be used by Fluentd at launch under the `/opt/bitnami/fluentd/conf` directory
##
configFile: fluentd.conf
## @param aggregator.configMap Name of the config map that contains the Fluentd configuration files
##
configMap: ""
## @param aggregator.configMapFiles [object] Files to be added to be config map. Ignored if `aggregator.configMap` is set
##
configMapFiles:
# 디폴트 conf파일에 기본기능을 위한 설정이 포함됨
# 디폴트 conf파일 미사용시, value파일의 다른 부분에서 관련기능을 찾아 disabled해줘야 에러방지됨
# :9880 # kubelet이 헬스체크(Probe)하려고 httpGet할 수 있도록 제공하는 포트
# POST로 json받으면 stdout(pod로그)로 띄워주므로 테스트용도로 겸사겸사 사용가능
# :24224 # forwarder로부터 데이터를 받기위한 포트
# :24231 # 외부 Prometheus가 fluentd의 메트릭을 Pull Request할 수 있도록 제공하는 포트
fluentd.conf: |
# Ignore fluentd own events
<match fluent.**>
@type null
</match>
@include kafka-produce.conf
# @include kafka-consume.conf
@include kafka-consume-group.conf
@include fluentd-inputs.conf
@include fluentd-output.conf
{{- if .Values.metrics.enabled }}
@include metrics.conf
{{- end }}
kafka-produce.conf: |
<source>
# in_http 플러그인: source섹션에서 tag지정 불가
# URL subpath가 source tag로 취급됨
# e.g) <fluentd_ip>:8888/kafka.produce
@type http
bind 0.0.0.0
port 8888
# format none # plaintext 처리 원할시 설정
</source>
<match kafka.produce>
@type kafka2 # Produce plugin
brokers test-kafka.default.svc.cluster.local:9092 # 콤마로 여러 주소 지정 가능(failure시 사용됨)
default_topic fluentd-test # 멀티토픽 불가, tag routing으로 개별처리 필요
# json만 받는다
<format>
@type json
</format>
# file 버퍼: 안정성, 확장성 중시 => 누락 원천차단, 근데 느림 (라이브시, pv 활성화 필요)
# memory 버퍼: 속도 중시 => 애초에 버퍼를 많이 안쓰고 빠르게 처리되는게 바람직, 근데 누락위험
<buffer fluentd-test>
@type memory
flush_mode interval
flush_interval 2s
flush_thread_count 4
chunk_limit_size 2MB #as small as possible
total_limit_size 4GB
retry_max_interval 30
retry_forever true
</buffer>
</match>
kafka-consume-group.conf: |
<source>
@type kafka_group # Consume plugin
brokers test-kafka.default.svc.cluster.local:9092
consumer_group fltd-consumer-group
topics /fluentd-.*/ # 멀티토픽 콤마 구분 지원, regex 가능!!
format json
# false면 최신부터 읽음. 첫 실행시에만 중요.
# 재기동할 땐 consumer-group의 offset을 따르기 때문에 이 설정값과 상관없이 안읽은부분부터 읽음
start_from_beginning true # default true
</source>
<match kafka.fluentd-test>
@type stdout
</match>
kafka-consume.conf: |
<source>
@type kafka # Consume plugin(Non-consumer-group) # 비권장
# consumer-group을 생성하지 않는 단일 consumer
# 단일 partition만 접근가능. default 0번
# broker에 __consumer_offset이 기록되지 않음, 중복 및 누락 위험성
# 실행시 대상 partition에서 읽기 시작할 offset을 수동지정가능, default -1(현재 최신 Record부터 가져옴)
# tag는 토픽명을 따름
brokers test-kafka.default.svc.cluster.local:9092
format json
topics fluentd-test # 콤마 구분 멀티토픽지원, regex 불가
# topic 별 처리하기
# <topic>
# topic <listening topic>
# partition <listening partition: default=0>
# offset <listening start offset: default=-1>
# </topic>
# <topic>
# topic <listening topic>
# partition <listening partition: default=0>
# offset <listening start offset: default=-1>
# </topic>
</source>
<match kafka.fluentd-test>
@type stdout
</match>
fluentd-inputs.conf: |
# TCP input to receive logs from
{{- if .Values.aggregator.port }}
<source>
@type forward
bind 0.0.0.0
port {{ .Values.aggregator.port }}
{{- if .Values.tls.enabled }}
<transport tls>
ca_path /opt/bitnami/fluentd/certs/in_forward/ca.crt
cert_path /opt/bitnami/fluentd/certs/in_forward/tls.crt
private_key_path /opt/bitnami/fluentd/certs/in_forward/tls.key
client_cert_auth true
</transport>
{{- end }}
</source>
{{- end }}
# HTTP input for the liveness and readiness probes
<source>
@type http
bind 0.0.0.0
port 9880
</source>
fluentd-output.conf: |
# Throw the healthcheck to the standard output
<match fluentd.healthcheck>
@type stdout
</match>
# Send the logs to the standard output
<match **>
@type stdout
</match>
metrics.conf: |
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@type prometheus
port {{ .Values.metrics.service.port }}
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
## @param aggregator.port Port the Aggregator container will listen for logs. Leave it blank to ignore.
## You can specify other ports in the aggregator.containerPorts parameter
##
port: 24224
## @param aggregator.extraArgs Extra arguments for the Fluentd command line
## ref: https://docs.fluentd.org/deployment/command-line-option
##
extraArgs: ""
## @param aggregator.extraEnvVars Extra environment variables to pass to the container
## extraEnvVars:
## - name: MY_ENV_VAR
## value: my_value
##
extraEnvVars: []
## @param aggregator.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Fluentd Aggregator nodes
##
extraEnvVarsCM: ""
## @param aggregator.extraEnvVarsSecret Name of existing Secret containing extra env vars for Fluentd Aggregator nodes
##
extraEnvVarsSecret: ""
## @param aggregator.containerPorts [array] Ports the aggregator containers will listen on
##
containerPorts:
# - name: my-port
# containerPort: 24222
# protocol: TCP
- name: http
containerPort: 9880
protocol: TCP
- name: test
containerPort: 8888
protocol: TCP
## Service parameters
##
service:
## @param aggregator.service.type Kubernetes service type (`ClusterIP`, `NodePort`, or `LoadBalancer`) for the aggregators
##
type: LoadBalancer
## @param aggregator.service.ports [object] Array containing the aggregator service ports
##
ports:
http:
port: 9880
targetPort: http
protocol: TCP
tcp:
port: 24224
targetPort: tcp
protocol: TCP
test:
port: 8888
targetPort: test
protocol: TCP
## @param aggregator.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific)
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerIP: ""
## @param aggregator.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
## loadBalancerSourceRanges:
## - 10.10.10.0/24
loadBalancerSourceRanges: []
## @param aggregator.service.clusterIP Static clusterIP or None for headless services
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param aggregator.service.annotations Provide any additional annotations which may be required
##
annotations: {}
## @param aggregator.service.externalTrafficPolicy Fluentd Aggregator service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param aggregator.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param aggregator.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## @param aggregator.service.annotationsHeadless Provide any additional annotations which may be required on headless service
##
annotationsHeadless: {}
## Headless service properties
##
headless:
## @param aggregator.service.headless.annotations Annotations for the headless service.
##
annotations: {}
## Network Policies
## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
networkPolicy:
## @param aggregator.networkPolicy.enabled Specifies whether a NetworkPolicy should be created
##
enabled: true
## @param aggregator.networkPolicy.allowExternal Don't require server label for connections
## The Policy model to apply. When set to false, only pods with the correct
## server label will have network access to the ports server is listening
## on. When true, server will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## @param aggregator.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations.
##
allowExternalEgress: true
## @param aggregator.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolice
## e.g:
## extraIngress:
## - ports:
## - port: 1234
## from:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
extraIngress: []
## @param aggregator.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy
## e.g:
## extraEgress:
## - ports:
## - port: 1234
## to:
## - podSelector:
## - matchLabels:
## - role: frontend
## - podSelector:
## - matchExpressions:
## - key: role
## operator: In
## values:
## - frontend
##
extraEgress: []
## @param aggregator.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces
## @param aggregator.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
## Configure the ingress resource that allows you to access the
## Fluentd aggregator. Set up the URL
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
## @param aggregator.ingress.enabled Set to true to enable ingress record generation
##
enabled: false
## DEPRECATED: Use ingress.annotations instead of ingress.certManager
## certManager: false
##
## @param aggregator.ingress.pathType Ingress Path type. How the path matching is interpreted
##
pathType: ImplementationSpecific
## @param aggregator.ingress.apiVersion Override API Version (automatically detected if not set)
##
apiVersion: ""
## @param aggregator.ingress.hostname Default host for the ingress resource
##
hostname: fluentd.local
## @param aggregator.ingress.path Default path for the ingress resource
## You may need to set this to '/*' in order to use this with ALB ingress controllers.
##
path: /
## @param aggregator.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations.
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md
## Use this parameter to set the required annotations for cert-manager, see
## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
##
## e.g:
## annotations:
## kubernetes.io/ingress.class: nginx
## cert-manager.io/cluster-issuer: cluster-issuer-name
##
annotations: {}
## @param aggregator.ingress.tls Enable TLS configuration for the hostname defined at ingress.hostname parameter
## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.aggregator.ingress.hostname | trunc 63 | trimSuffix "-" }}
## You can use the ingress.secrets parameter to create this TLS secret or rely on cert-manager to create it
##
tls: false
## @param aggregator.ingress.extraHosts The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## extraHosts:
## - name: fluentd.local
## path: /
##
extraHosts: []
## @param aggregator.ingress.extraPaths Any additional arbitrary paths that may need to be added to the ingress under the main host.
## For example: The ALB ingress controller requires a special rule for handling SSL redirection.
## extraPaths:
## - path: /*
## backend:
## serviceName: ssl-redirect
## servicePort: use-annotation
##
extraPaths: []
## @param aggregator.ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record.
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## extraTls:
## - hosts:
## - fluentd.local
## secretName: fluentd.local-tls
##
extraTls: []
## @param aggregator.ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
## e.g:
## - name: fluentd.local-tls
## key:
## certificate:
##
secrets: []
## @param aggregator.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+)
## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster .
## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/
##
ingressClassName: ""
## @param aggregator.ingress.extraRules Additional rules to be covered with this ingress record
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
## e.g:
## extraRules:
## - host: example.local
## http:
## path: /
## backend:
## service:
## name: example-svc
## port:
## name: http
##
extraRules: []
## Configure extra options for startup probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param aggregator.startupProbe.enabled Enable startupProbe
## @param aggregator.startupProbe.httpGet.path Request path for startupProbe
## @param aggregator.startupProbe.httpGet.port Port for startupProbe
## @param aggregator.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
## @param aggregator.startupProbe.periodSeconds Period seconds for startupProbe
## @param aggregator.startupProbe.timeoutSeconds Timeout seconds for startupProbe
## @param aggregator.startupProbe.failureThreshold Failure threshold for startupProbe
## @param aggregator.startupProbe.successThreshold Success threshold for startupProbe
##
startupProbe:
enabled: false
httpGet:
path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
port: http
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param aggregator.livenessProbe.enabled Enable livenessProbe
## @param aggregator.livenessProbe.httpGet.path Request path for livenessProbe
## @param aggregator.livenessProbe.httpGet.port Port for livenessProbe
## @param aggregator.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
## @param aggregator.livenessProbe.periodSeconds Period seconds for livenessProbe
## @param aggregator.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
## @param aggregator.livenessProbe.failureThreshold Failure threshold for livenessProbe
## @param aggregator.livenessProbe.successThreshold Success threshold for livenessProbe
##
livenessProbe:
enabled: true
httpGet:
path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
port: http
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param aggregator.readinessProbe.enabled Enable readinessProbe
## @param aggregator.readinessProbe.httpGet.path Request path for readinessProbe
## @param aggregator.readinessProbe.httpGet.port Port for readinessProbe
## @param aggregator.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
## @param aggregator.readinessProbe.periodSeconds Period seconds for readinessProbe
## @param aggregator.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
## @param aggregator.readinessProbe.failureThreshold Failure threshold for readinessProbe
## @param aggregator.readinessProbe.successThreshold Success threshold for readinessProbe
##
readinessProbe:
enabled: false
httpGet:
path: /fluentd.healthcheck?json=%7B%22ping%22%3A+%22pong%22%7D
port: http
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## @param aggregator.customStartupProbe Custom liveness probe for the Fluentd Aggregator
##
customStartupProbe: {}
## @param aggregator.customLivenessProbe Custom liveness probe for the Fluentd Aggregator
##
customLivenessProbe: {}
## @param aggregator.customReadinessProbe Custom rediness probe for the Fluentd Aggregator
##
customReadinessProbe: {}
## @param aggregator.updateStrategy.type Set up update strategy.
## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
## Example:
## updateStrategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
updateStrategy:
type: RollingUpdate
## Aggregator containers' resource requests and limits
## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param aggregator.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if aggregator.resources is set (aggregator.resources is recommended for production).
## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15
##
resourcesPreset: "none"
## @param aggregator.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads)
## Example:
## resources:
## requests:
## cpu: 2
## memory: 512Mi
## limits:
## cpu: 3
## memory: 1024Mi
##
resources: {}
## @param aggregator.priorityClassName Fluentd Aggregator pods' priorityClassName
##
priorityClassName: ""
## @param aggregator.schedulerName Name of the k8s scheduler (other than default)
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
## @param aggregator.topologySpreadConstraints Topology Spread Constraints for pod assignment
## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
## The value is evaluated as a template
##
topologySpreadConstraints: []
## @param aggregator.podManagementPolicy podManagementPolicy to manage scaling operation of Fluentd Aggregator pods
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
##
podManagementPolicy: ""
## @param aggregator.podAffinityPreset Aggregator Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAffinityPreset: ""
## @param aggregator.podAntiAffinityPreset Aggregator Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
##
podAntiAffinityPreset: soft
## Node affinity preset
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
##
nodeAffinityPreset:
## @param aggregator.nodeAffinityPreset.type Aggregator Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard`
##
type: ""
## @param aggregator.nodeAffinityPreset.key Aggregator Node label key to match Ignored if `affinity` is set.
##
key: ""
## @param aggregator.nodeAffinityPreset.values Aggregator Node label values to match. Ignored if `affinity` is set.
## E.g.
## values:
## - e2e-az1
## - e2e-az2
##
values: []
## @param aggregator.affinity Aggregator Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
affinity: {}
## @param aggregator.nodeSelector Aggregator Node labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
##
nodeSelector: {}
## @param aggregator.tolerations Aggregator Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## @param aggregator.podAnnotations Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## @param aggregator.podLabels Extra labels to add to Pod
##
podLabels: {}
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
## @param aggregator.serviceAccount.create Specify whether a ServiceAccount should be created
##
create: true
## @param aggregator.serviceAccount.name The name of the ServiceAccount to create
## If not set and create is true, a name is generated using the common.names.fullname template
name: ""
## @param aggregator.serviceAccount.annotations Additional Service Account annotations (evaluated as a template)
##
annotations: {}
## @param aggregator.serviceAccount.automountServiceAccountToken Automount service account token for the server service account
##
automountServiceAccountToken: false
## Autoscaling parameters
## This is not recommended in a forwarder+aggregator architecture
## @param aggregator.autoscaling.enabled Create an Horizontal Pod Autoscaler
## @param aggregator.autoscaling.minReplicas Minimum number of replicas for the HPA
## @param aggregator.autoscaling.maxReplicas Maximum number of replicas for the HPA
## @param aggregator.autoscaling.metrics [array] Metrics for the HPA to manage the scaling
##
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 60
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 60
## Persist data to a persistent volume
##
persistence:
## @param aggregator.persistence.enabled Enable persistence volume for the aggregator
##
enabled: false
## @param aggregator.persistence.storageClass Persistent Volume storage class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
## @param aggregator.persistence.accessModes Persistent Volume access modes
##
accessModes:
- ReadWriteOnce
## @param aggregator.persistence.size Persistent Volume size
##
size: 10Gi
## @param aggregator.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template)
## selector:
## matchLabels:
## app: my-app
selector: {}
## @param aggregator.persistence.annotations Persistent Volume Claim annotations
##
annotations: {}
## @param aggregator.command Override default container command (useful when using custom images)
##
command: []
## @param aggregator.args Override default container args (useful when using custom images)
##
args: []
## @param aggregator.lifecycleHooks Additional lifecycles to add to the pods
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
## e.g:
## postStart:
## exec:
## command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
## preStop:
## exec:
## command: ["/bin/sh","-c","nginx -s quit; while killall -0 nginx; do sleep 1; done"]
##
lifecycleHooks: {}
## @param aggregator.sidecars Add sidecars to aggregator pods
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
sidecars: []
## @param aggregator.initContainers Add init containers to aggregator pods
initContainers: # []
- name: install-offline-plugins
image: '{{ include "fluentd.aggregator.image" . }}' # 동일 이미지 tpl
imagePullPolicy: IfNotPresent
# 명령어 Permission Denied 방지
securityContext:
allowPrivilegeEscalation: false
runAsUser: 0
command: [sh, -c]
args:
- |-
# install gems in Init Container
fluent-gem install /plugins/*.gem
# copy installed gems to shared dir
cp -r /opt/bitnami/fluentd/extensions/* /extensions/
cp -r /opt/bitnami/fluentd/gems/* /gems/
cp -r /opt/bitnami/fluentd/specifications/* /specifications
# init Container에 Volume 추가(마운트)
volumeMounts:
# 로컬 설치파일을 init container에 등록
- name: local-gems
mountPath: /plugins # init container 내부 경로
# App Container, Init Container 간 공유 볼륨
- name: shared-dir
mountPath: /extentions # init container 내부 경로
subPath: extentions # 볼륨 내 서브경로 매칭
- name: shared-dir
mountPath: /gems # init container 내부 경로
subPath: gems # 볼륨 내 서브경로 매칭
- name: shared-dir
mountPath: /specifications # init container 내부 경로
subPath: specifications # 볼륨 내 서브경로 매칭
# 새 볼륨을 정의할 때 사용
extraVolumes: # []
# 로컬호스트(Node)와 Init Container 간 공유 볼륨
- name: local-gems
hostPath:
type: Directory
path: /etc/plugins # 로컬호스트 경로
# App Container, Init Container 간 공유 볼륨
- name: shared-dir
emptyDir: {}
# 정의된 볼륨을 컨테이너 내부 경로에 마운트할 때 사용
extraVolumeMounts: # []
- name: shared-dir
mountPath: /opt/bitnami/fluentd/extensions/ # App. Container 내부 경로
subPath: extensions
- name: shared-dir
mountPath: /opt/bitnami/fluentd/gems/ # App. Container 내부 경로
subPath: gems
- name: shared-dir
mountPath: /opt/bitnami/fluentd/specifications/ # App. Container 내부 경로
subPath: specifications
## @param aggregator.extraVolumeClaimTemplates Optionally specify extra list of additional volume claim templates for the Fluentd Aggregator pods in StatefulSet
##
extraVolumeClaimTemplates: []
## @param aggregator.initScripts Dictionary of init scripts. Evaluated as a template.
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
## For example:
## initScripts:
## my_init_script.sh: |
## #!/bin/sh
## echo "Do something."
##
initScripts: {}
## @param aggregator.initScriptsCM ConfigMap with the init scripts. Evaluated as a template.
## Note: This will override initScripts
##
initScriptsCM: ""
## @param aggregator.initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template.
##
initScriptsSecret: ""
## Prometheus Exporter / Metrics
##
metrics:
## @param metrics.enabled Enable the export of Prometheus metrics
##
enabled: false
## Prometheus Exporter service parameters
##
service:
## @param metrics.service.type Prometheus metrics service type
##
type: ClusterIP
## @param metrics.service.port Prometheus metrics service port
##
port: 24231
## @param metrics.service.loadBalancerIP Load Balancer IP if the Prometheus metrics server type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer
##
loadBalancerIP: ""
## @param metrics.service.clusterIP Prometheus metrics service Cluster IP
## e.g.:
## clusterIP: None
##
clusterIP: ""
## @param metrics.service.loadBalancerSourceRanges Prometheus metrics service Load Balancer sources
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param metrics.service.externalTrafficPolicy Prometheus metrics service external traffic policy
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param metrics.service.annotations [object] Annotations for the Prometheus Exporter service service
## If port or path annotation is provided the values will be used to in the prometheus ServiceMonitor CRD.
##
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "24231"
prometheus.io/path: "/metrics"
## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
## Prometheus Operator ServiceMonitor configuration
##
serviceMonitor:
## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`)
##
enabled: false
## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
##