-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
Copy pathvalues.yaml
698 lines (656 loc) · 30 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
# Default values for a single rook-ceph cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Namespace of the main rook operator
operatorNamespace: rook-ceph
# -- The metadata.name of the CephCluster CR
# @default -- The same as the namespace
clusterName:
# -- Optional override of the target kubernetes version
kubeVersion:
# -- Cluster ceph.conf override
configOverride:
# configOverride: |
# [global]
# mon_allow_pool_delete = true
# osd_pool_default_size = 3
# osd_pool_default_min_size = 2
# Installs a debugging toolbox deployment
toolbox:
# -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
enabled: false
# -- Toolbox image, defaults to the image used by the Ceph cluster
image: #quay.io/ceph/ceph:v19.2.0
# -- Toolbox tolerations
tolerations: []
# -- Toolbox affinity
affinity: {}
# -- Toolbox container security context
containerSecurityContext:
runAsNonRoot: true
runAsUser: 2016
runAsGroup: 2016
capabilities:
drop: ["ALL"]
# -- Toolbox resources
resources:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "128Mi"
# -- Set the priority class for the toolbox if desired
priorityClassName:
monitoring:
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
# Monitoring requires Prometheus to be pre-installed
enabled: false
# -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
metricsDisabled: false
# -- Whether to create the Prometheus rules for Ceph alerts
createPrometheusRules: false
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespaceOverride:
# Monitoring settings for external clusters:
# externalMgrEndpoints: <list of endpoints>
# externalMgrPrometheusPort: <port>
# Scrape interval for prometheus
# interval: 10s
# allow adding custom labels and annotations to the prometheus rule
prometheusRule:
# -- Labels applied to PrometheusRule
labels: {}
# -- Annotations applied to PrometheusRule
annotations: {}
# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
pspEnable: false
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret
# All values below are taken from the CephCluster CRD
# -- Cluster configuration.
# @default -- See [below](#ceph-cluster-spec)
cephClusterSpec:
# This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
# as in the host-based example (cluster.yaml). For a different configuration such as a
# PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
# or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
# with the specs from those examples.
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v18 is Reef, v19 is Squid
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v19.2.0
# Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
# Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
# Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
# This configuration will be ignored if `skipUpgradeChecks` is `true`.
# Default is false.
upgradeOSDRequiresHealthyPGs: false
mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 2
allowMultiplePerNode: false
modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
# - name: rook
# enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
# the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
ssl: true
# Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
# The kernel requirements above for encryption also apply to compression.
compression:
enabled: false
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
requireMsgr2: false
# # enable host networking
# provider: host
# # EXPERIMENTAL: enable the Multus network provider
# provider: multus
# selectors:
# # The selector keys are required to be `public` and `cluster`.
# # Based on the configuration, the operator will do the following:
# # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
# # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
# #
# # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
# #
# # public: public-conf --> NetworkAttachmentDefinition object name in Multus
# # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
# # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
# ipFamily: "IPv6"
# # Ceph daemons to listen on both IPv4 and Ipv6 networks
# dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
# daysToRetain: 30
# enable log collector, daemons will log on files and rotate
logCollector:
enabled: true
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# # The above placement information can also be specified for mon, osd, and mgr components
# mon:
# # Monitor deployments may contain an anti-affinity rule for avoiding monitor
# # collocation on the same node. This is a required rule when host network is used
# # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# # preferred rule with weight: 50.
# osd:
# mgr:
# cleanup:
# annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# # If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
# dashboard:
# labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# # These labels can be passed as LabelSelector to Prometheus
# monitoring:
# dashboard:
resources:
mgr:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "512Mi"
mon:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
osd:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
prepareosd:
# limits: It is not recommended to set limits on the OSD prepare job
# since it's a one-time burst for memory that must be allowed to
# complete without an OOM kill. Note however that if a k8s
# limitRange guardrail is defined external to Rook, the lack of
# a limit here may result in a sync failure, in which case a
# limit should be added. 1200Mi may suffice for up to 15Ti
# OSDs ; for larger devices 2Gi may be required.
# cf. https://github.com/rook/rook/pull/11103
requests:
cpu: "500m"
memory: "50Mi"
mgr-sidecar:
limits:
memory: "100Mi"
requests:
cpu: "100m"
memory: "40Mi"
crashcollector:
limits:
memory: "60Mi"
requests:
cpu: "100m"
memory: "60Mi"
logcollector:
limits:
memory: "1Gi"
requests:
cpu: "100m"
memory: "100Mi"
cleanup:
limits:
memory: "1Gi"
requests:
cpu: "500m"
memory: "100Mi"
exporter:
limits:
memory: "128Mi"
requests:
cpu: "50m"
memory: "50Mi"
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priority classes to apply to ceph resources
priorityClassNames:
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
# deviceFilter:
# config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
# Configure the healthcheck and liveness probes for ceph pods.
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe, it works for all mon, mgr, and osd pods.
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
ingress:
# -- Enable an ingress for the ceph-dashboard
dashboard:
{}
# annotations:
# external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/server-snippet: |
# proxy_ssl_verify off;
# host:
# name: dashboard.example.com
# path: "/ceph-dashboard(/|$)(.*)"
# tls:
# - hosts:
# - dashboard.example.com
# secretName: testsecret-tls
## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
## to set the ingress class
# ingressClassName: nginx
# -- A list of CephBlockPool configurations to deploy
# @default -- See [below](#ceph-block-pools)
cephBlockPools:
- name: ceph-blockpool
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
size: 3
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
# enableRBDStats: true
storageClass:
enabled: true
name: ceph-block
annotations: {}
labels: {}
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
mountOptions: []
# see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
allowedTopologies: []
# - matchLabelExpressions:
# - key: rook-ceph-role
# values:
# - storage-node
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
parameters:
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# mapOptions: lock_on_read,queue_depth=1024
# (optional) unmapOptions is a comma-separated list of unmap options.
# For krbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# For nbd options refer
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# unmapOptions: force
# RBD image format. Defaults to "2".
imageFormat: "2"
# RBD image features, equivalent to OR'd bitfield value: 63
# Available for imageFormat: "2". Older releases of CSI RBD
# support only the `layering` feature. The Linux kernel (KRBD) supports the
# full feature complement as of 5.4
imageFeatures: layering
# These secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- A list of CephFileSystem configurations to deploy
# @default -- See [below](#ceph-file-systems)
cephFileSystems:
- name: ceph-filesystem
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
memory: "4Gi"
requests:
cpu: "1000m"
memory: "4Gi"
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
# (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
mountOptions: []
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# -- Settings for the filesystem snapshot class
# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
cephFileSystemVolumeSnapshotClass:
enabled: false
name: ceph-filesystem
isDefault: true
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
parameters: {}
# -- Settings for the block pool snapshot class
# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: ceph-block
isDefault: false
deletionPolicy: Delete
annotations: {}
labels: {}
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
parameters: {}
# -- A list of CephObjectStore configurations to deploy
# @default -- See [below](#ceph-object-stores)
cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
erasureCoded:
dataChunks: 2
codingChunks: 1
parameters:
bulk: "true"
preservePoolsOnDelete: true
gateway:
port: 80
resources:
limits:
memory: "2Gi"
requests:
cpu: "1000m"
memory: "1Gi"
# securePort: 443
# sslCertificateRef:
instances: 1
priorityClassName: system-cluster-critical
# opsLogSidecar:
# resources:
# limits:
# memory: "100Mi"
# requests:
# cpu: "100m"
# memory: "40Mi"
storageClass:
enabled: true
name: ceph-bucket
reclaimPolicy: Delete
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
# see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
parameters:
# note: objectStoreNamespace and objectStoreName are configured by the chart
region: us-east-1
ingress:
# Enable an ingress for the ceph-objectstore
enabled: false
# annotations: {}
# host:
# name: objectstore.example.com
# path: /
# tls:
# - hosts:
# - objectstore.example.com
# secretName: ceph-objectstore-tls
# ingressClassName: nginx
## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
## For erasure coded a replicated metadata pool is required.
## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
#cephECBlockPools:
# - name: ec-pool
# spec:
# metadataPool:
# replicated:
# size: 2
# dataPool:
# failureDomain: osd
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
# deviceClass: hdd
#
# parameters:
# # clusterID is the namespace where the rook cluster is running
# # If you change this namespace, also change the namespace below where the secret namespaces are defined
# clusterID: rook-ceph # namespace:cluster
# # (optional) mapOptions is a comma-separated list of map options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # mapOptions: lock_on_read,queue_depth=1024
#
# # (optional) unmapOptions is a comma-separated list of unmap options.
# # For krbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
# # For nbd options refer
# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
# # unmapOptions: force
#
# # RBD image format. Defaults to "2".
# imageFormat: "2"
#
# # RBD image features, equivalent to OR'd bitfield value: 63
# # Available for imageFormat: "2". Older releases of CSI RBD
# # support only the `layering` feature. The Linux kernel (KRBD) supports the
# # full feature complement as of 5.4
# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
# imageFeatures: layering
#
# storageClass:
# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
# enabled: true
# name: rook-ceph-block
# isDefault: false
# annotations: { }
# labels: { }
# allowVolumeExpansion: true
# reclaimPolicy: Delete
# -- CSI driver name prefix for cephfs, rbd and nfs.
# @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix: