cephBlockPools: - name: ceph-blockpool spec: enableRBDStats: true failureDomain: host replicated: size: 2 storageClass: allowVolumeExpansion: true allowedTopologies: [] enabled: true isDefault: true mountOptions: [] name: ceph-block parameters: csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: '{{ .Release.Namespace }}' csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: '{{ .Release.Namespace }}' csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: '{{ .Release.Namespace }}' imageFeatures: layering imageFormat: "2" reclaimPolicy: Delete volumeBindingMode: Immediate cephBlockPoolsVolumeSnapshotClass: annotations: {} deletionPolicy: Delete enabled: true isDefault: false labels: {} name: ceph-block parameters: {} cephClusterSpec: cephVersion: image: quay.io/ceph/ceph:v18.2.2 cleanupPolicy: allowUninstallWithVolumes: false confirmation: "" sanitizeDisks: dataSource: zero iteration: 1 method: quick dashboard: enabled: true port: 8080 ssl: false disruptionManagement: managePodBudgets: true osdMaintenanceTimeout: 30 pgHealthCheckTimeout: 0 healthCheck: daemonHealth: mon: disabled: false interval: 45s osd: disabled: false interval: 60s status: disabled: false interval: 60s livenessProbe: mgr: disabled: false mon: disabled: false osd: disabled: false labels: mgr: release: prometheus monitoring: release: prometheus logCollector: enabled: true maxLogSize: 500M periodicity: daily mgr: allowMultiplePerNode: false count: 2 modules: - enabled: true name: rook - enabled: true name: pg_autoscaler - enabled: true name: prometheus mon: allowMultiplePerNode: false count: 1 network: connections: compression: enabled: false encryption: enabled: false requireMsgr2: true priorityClassNames: mgr: system-cluster-critical mon: system-node-critical osd: system-node-critical removeOSDsIfOutAndSafeToRemove: false resources: cleanup: limits: memory: 1Gi requests: cpu: 100m memory: 100Mi crashcollector: limits: memory: 60Mi requests: cpu: 100m memory: 60Mi exporter: limits: memory: 128Mi requests: cpu: 50m memory: 50Mi logcollector: limits: memory: 1Gi requests: cpu: 100m memory: 100Mi mgr: limits: memory: 1Gi requests: cpu: 100m memory: 512Mi mgr-sidecar: limits: memory: 100Mi requests: cpu: 100m memory: 40Mi mon: limits: memory: 2Gi requests: cpu: 100m memory: 512Mi osd: limits: memory: 4Gi requests: cpu: 100m memory: 512Mi prepareosd: requests: cpu: 100m memory: 50Mi storage: devices: - name: /dev/vg0/ceph-0 - name: /dev/vg0/ceph-1 useAllDevices: false useAllNodes: true cephFileSystemVolumeSnapshotClass: annotations: {} deletionPolicy: Delete enabled: true isDefault: true labels: {} name: ceph-filesystem parameters: {} cephFileSystems: - name: ceph-filesystem spec: dataPools: - failureDomain: host name: data0 replicated: size: 2 metadataPool: replicated: size: 2 metadataServer: activeCount: 1 activeStandby: true priorityClassName: system-cluster-critical resources: limits: cpu: 2000m memory: 4Gi requests: cpu: 100m memory: 1Gi storageClass: allowVolumeExpansion: true enabled: true isDefault: false mountOptions: [] name: ceph-filesystem parameters: csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: '{{ .Release.Namespace }}' csi.storage.k8s.io/fstype: ext4 csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node csi.storage.k8s.io/node-stage-secret-namespace: '{{ .Release.Namespace }}' csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner csi.storage.k8s.io/provisioner-secret-namespace: '{{ .Release.Namespace }}' pool: data0 reclaimPolicy: Delete volumeBindingMode: Immediate cephObjectStores: - ingress: enabled: false name: ceph-objectstore spec: dataPool: failureDomain: host replicated: size: 2 gateway: instances: 1 port: 80 priorityClassName: system-cluster-critical resources: limits: cpu: 2000m memory: 2Gi requests: cpu: 100m memory: 1Gi metadataPool: failureDomain: host replicated: size: 2 preservePoolsOnDelete: false storageClass: enabled: true name: ceph-bucket parameters: region: us-east-1 reclaimPolicy: Delete volumeBindingMode: Immediate clusterName: ceph configOverride: | [global] mon_allow_pool_delete = true osd_pool_default_size = 1 osd_pool_default_min_size = 1 ingress: dashboard: annotations: cert-manager.io/cluster-issuer: idm external-dns.alpha.kubernetes.io/hostname: ceph.example.com external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only external-dns.alpha.kubernetes.io/target: private.int.example.com external-dns.alpha.kubernetes.io/ttl: "60" external-dns.alpha.kubernetes.io/zone: example.com kubernetes.io/ingress.class: private host: name: ceph.example.com path: / ingressClassName: false tls: - hosts: - ceph.example.com secretName: ceph.example.com.tls monitoring: createPrometheusRules: true enabled: true prometheusRule: labels: release: prometheus rulesNamespaceOverride: monitoring operatorNamespace: storage toolbox: enabled: true image: quay.io/ceph/ceph:v18.2.2