-
Notifications
You must be signed in to change notification settings - Fork 0
/
40_alertrules.yaml
492 lines (492 loc) · 23.9 KB
/
40_alertrules.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
labels:
app.kubernetes.io/component: rook-ceph
app.kubernetes.io/managed-by: commodore
app.kubernetes.io/name: syn-prometheus-ceph-rules
prometheus: rook-prometheus
role: alert-rules
name: syn-prometheus-ceph-rules
namespace: syn-rook-ceph-cluster
spec:
groups:
- name: cluster health
rules:
- alert: SYN_CephHealthError
annotations:
description: The cluster state has been HEALTH_ERROR for more than 5 minutes.
Please check 'ceph health detail' for more information.
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephHealthError.html
summary: Ceph is in the ERROR state
expr: ceph_health_status == 2
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.2.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephHealthWarning
annotations:
description: The cluster state has been HEALTH_WARN for more than 15 minutes.
Please check 'ceph health detail' for more information.
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephHealthWarning.html
summary: Ceph is in the WARNING state
expr: ceph_health_status == 1
for: 15m
labels:
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: mon
rules:
- alert: SYN_CephMonDownQuorumAtRisk
annotations:
description: '{{ $min := query "floor(count(ceph_mon_metadata) / 2) +
1" | first | value }}Quorum requires a majority of monitors (x {{ $min
}}) to be active. Without quorum the cluster will become inoperable,
affecting all services and connected clients. The following monitors
are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon)
group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon
}} on {{ .Labels.hostname }} {{- end }}'
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephMonDownQuorumAtRisk.html
summary: Monitor quorum is at risk
expr: |
(
(ceph_health_detail{name="MON_DOWN"} == 1) * on() (
count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1)
)
) == 1
for: 30s
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.3.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephMonDown
annotations:
description: |
{{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephMonDown.html
summary: One or more monitors down
expr: |
count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1)
for: 30s
labels:
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: osd
rules:
- alert: SYN_CephOSDDown
annotations:
description: |
{{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephOSDDown.html
summary: An OSD has been marked down
expr: ceph_health_detail{name="OSD_DOWN"} == 1
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.4.2
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephOSDFull
annotations:
description: An OSD has reached the FULL threshold. Writes to pools that
share the affected OSD will be blocked. Use 'ceph health detail' and
'ceph osd df' to identify the problem. To resolve, add capacity to the
affected OSD's failure domain, restore down/out OSDs, or delete unwanted
data.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephOSDFull.html
summary: OSD full, writes blocked
expr: ceph_health_detail{name="OSD_FULL"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.4.6
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephOSDFlapping
annotations:
description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was
marked down and back up {{ $value | humanize }} times once a minute
for 5 minutes. This may indicate a network issue (latency, packet loss,
MTU mismatch) on the cluster network, or the public network if no cluster
network is deployed. Check the network stats on the listed host(s).
documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephOSDFlapping.html
summary: Network issues are causing OSDs to flap (mark each other down)
expr: (rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata)
* 60 > 1
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.4.4
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: mds
rules:
- alert: SYN_CephFilesystemDamaged
annotations:
description: Filesystem metadata has been corrupted. Data may be inaccessible.
Analyze metrics from the MDS daemon admin socket, or escalate to support.
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephFilesystemDamaged.html
summary: CephFS filesystem is damaged.
expr: ceph_health_detail{name="MDS_DAMAGE"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.5.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephFilesystemOffline
annotations:
description: All MDS ranks are unavailable. The MDS daemons managing metadata
are down, rendering the filesystem offline.
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephFilesystemOffline.html
summary: CephFS filesystem is offline
expr: ceph_health_detail{name="MDS_ALL_DOWN"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.5.3
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephFilesystemDegraded
annotations:
description: One or more metadata daemons (MDS ranks) are failed or in
a damaged state. At best the filesystem is partially available, at worst
the filesystem is completely unusable.
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephFilesystemDegraded.html
summary: CephFS filesystem is degraded
expr: ceph_health_detail{name="FS_DEGRADED"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.5.4
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephFilesystemFailureNoStandby
annotations:
description: An MDS daemon has failed, leaving only one active rank and
no available standby. Investigate the cause of the failure or add a
standby MDS.
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephFilesystemFailureNoStandby.html
summary: MDS daemon failed, no further standby available
expr: ceph_health_detail{name="FS_WITH_FAILED_MDS"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.5.5
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephFilesystemReadOnly
annotations:
description: The filesystem has switched to READ ONLY due to an unexpected
error when writing to the metadata pool. Either analyze the output from
the MDS daemon admin socket, or escalate to support.
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephFilesystemReadOnly.html
summary: CephFS filesystem in read only mode due to write error(s)
expr: ceph_health_detail{name="MDS_HEALTH_READ_ONLY"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.5.2
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: mgr
rules:
- alert: SYN_CephMgrModuleCrash
annotations:
description: One or more mgr modules have crashed and have yet to be acknowledged
by an administrator. A crashed module may impact functionality within
the cluster. Use the 'ceph crash' command to determine which module
has failed, and archive it to acknowledge the failure.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephMgrModuleCrash.html
summary: A manager module has recently crashed
expr: ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"} == 1
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.6.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephMgrPrometheusModuleInactive
annotations:
description: The mgr/prometheus module at {{ $labels.instance }} is unreachable.
This could mean that the module has been disabled or the mgr daemon
itself is down. Without the mgr/prometheus module metrics and alerts
will no longer function. Open a shell to an admin node or toolbox pod
and use 'ceph -s' to to determine whether the mgr is active. If the
mgr is not active, restart it, otherwise you can determine module status
with 'ceph mgr module ls'. If it is not listed as enabled, enable it
with 'ceph mgr module enable prometheus'.
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephMgrPrometheusModuleInactive.html
summary: The mgr/prometheus module is not available
expr: up{job="ceph"} == 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.6.2
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: pgs
rules:
- alert: SYN_CephPGsInactive
annotations:
description: '{{ $value }} PGs have been inactive for more than 5 minutes
in pool {{ $labels.name }}. Inactive placement groups are not able to
serve read/write requests.'
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPGsInactive.html
summary: One or more placement groups are inactive
expr: ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total
- ceph_pg_active) > 0
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.7.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephPGsDamaged
annotations:
description: During data consistency checks (scrub), at least one PG has
been flagged as being damaged or inconsistent. Check to see which PG
is affected, and attempt a manual repair if necessary. To list problematic
placement groups, use 'rados list-inconsistent-pg <pool>'. To repair
PGs use the 'ceph pg repair <pg_num>' command.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPGsDamaged.html
summary: Placement group damaged, manual intervention needed
expr: ceph_health_detail{name=~"PG_DAMAGED|OSD_SCRUB_ERRORS"} == 1
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.7.4
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephPGRecoveryAtRisk
annotations:
description: Data redundancy is at risk since one or more OSDs are at
or above the 'full' threshold. Add more capacity to the cluster, restore
down/out OSDs, or delete unwanted data.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPGRecoveryAtRisk.html
summary: OSDs are too full for recovery
expr: ceph_health_detail{name="PG_RECOVERY_FULL"} == 1
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.7.5
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephPGUnavailableBlockingIO
annotations:
description: Data availability is reduced, impacting the cluster's ability
to service I/O. One or more placement groups (PGs) are in a state that
blocks I/O.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPGUnavailableBlockingIO.html
summary: PG is unavailable, blocking I/O
expr: ((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"}))
== 1
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.7.3
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephPGBackfillAtRisk
annotations:
description: Data redundancy may be at risk due to lack of free space
within the cluster. One or more OSDs have reached the 'backfillfull'
threshold. Add more capacity, or delete unwanted data.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPGBackfillAtRisk.html
summary: Backfill operations are blocked due to lack of free space
expr: ceph_health_detail{name="PG_BACKFILL_FULL"} == 1
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.7.6
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: nodes
rules:
- alert: SYN_CephNodeRootFilesystemFull
annotations:
description: 'Root volume is dangerously full: {{ $value | humanize }}%
free.'
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephNodeRootFilesystemFull.html
summary: Root filesystem is dangerously full
expr: node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}
* 100 < 5
for: 5m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.8.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- alert: SYN_CephNodeNetworkBondDegraded
annotations:
description: Bond {{ $labels.master }} is degraded on Node {{ $labels.instance
}}.
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephNodeNetworkBondDegraded.html
summary: Degraded Bond on Node {{ $labels.instance }}
expr: |
node_bonding_slaves - node_bonding_active != 0
labels:
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: pools
rules:
- alert: SYN_CephPoolFull
annotations:
description: A pool has reached its MAX quota, or OSDs supporting the
pool have reached the FULL threshold. Until this is resolved, writes
to the pool will be blocked. Pool Breakdown (top 5) {{- range query
"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right
ceph_pool_metadata))" }} - {{ .Labels.name }} at {{ .Value }}% {{- end
}} Increase the pool's quota, or add capacity to the cluster first then
increase the pool's quota (e.g. ceph osd pool set quota <pool_name>
max_bytes <bytes>)
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephPoolFull.html
summary: Pool is full - writes are blocked
expr: ceph_health_detail{name="POOL_FULL"} > 0
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.9.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: healthchecks
rules:
- alert: SYN_CephDaemonSlowOps
annotations:
description: '{{ $labels.ceph_daemon }} operations are taking too long
to process (complaint time exceeded)'
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephDaemonSlowOps.html
summary: '{{ $labels.ceph_daemon }} operations are slow to complete'
expr: ceph_daemon_health_metrics{type="SLOW_OPS"} > 0
for: 30s
labels:
severity: warning
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: rados
rules:
- alert: SYN_CephObjectMissing
annotations:
description: The latest version of a RADOS object can not be found, even
though all OSDs are up. I/O requests for this object from clients will
block (hang). Resolving this issue may require the object to be rolled
back to a prior version manually, and manually verified.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephObjectMissing.html
summary: Object(s) marked UNFOUND
expr: (ceph_health_detail{name="OBJECT_UNFOUND"} == 1) * on() (count(ceph_osd_up
== 1) == bool count(ceph_osd_metadata)) == 1
for: 30s
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.10.1
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: generic
rules:
- alert: SYN_CephDaemonCrash
annotations:
description: One or more daemons have crashed recently, and need to be
acknowledged. This notification ensures that software crashes do not
go unseen. To acknowledge a crash, use the 'ceph crash archive <id>'
command.
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/CephDaemonCrash.html
summary: One or more Ceph daemons have crashed, and are pending acknowledgement
expr: ceph_health_detail{name="RECENT_CRASH"} == 1
for: 1m
labels:
oid: 1.3.6.1.4.1.50495.1.2.1.1.2
severity: critical
syn: 'true'
syn_component: rook-ceph
type: ceph_default
- name: syn-rook-ceph-additional.rules
rules:
- alert: RookCephOperatorScaledDown
annotations:
runbook_url: https://hub.syn.tools/rook-ceph/runbooks/RookCephOperatorScaledDown.html
summary: rook-ceph operator scaled to 0 for more than 1 hour.
expr: kube_deployment_spec_replicas{deployment="rook-ceph-operator", namespace="syn-rook-ceph-operator"}
== 0
for: 1h
labels:
severity: warning
syn: 'true'
syn_component: rook-ceph
- expr: sum(ceph_mon_num_sessions{})
record: ceph_mon_num_sessions:sum
- expr: count(ceph_mon_quorum_status{})
record: ceph_mon_quorum_status:count
- expr: avg(ceph_osd_apply_latency_ms{})
record: ceph_osd_apply_latency_ms:avg
- expr: avg(ceph_osd_commit_latency_ms{})
record: ceph_osd_commit_latency_ms:avg
- expr: sum(ceph_osd_numpg{})
record: ceph_osd_numpg:sum
- expr: sum(rate(ceph_osd_op_r{}[5m]))
record: ceph_osd_op_r:rate5m
- expr: avg(rate(ceph_osd_op_r_latency_sum{}[5m]) / rate(ceph_osd_op_r_latency_count{}[5m])
>= 0)
record: ceph_osd_op_r_latency:avg5m
- expr: sum(rate(ceph_osd_op_r_out_bytes{}[5m]))
record: ceph_osd_op_r_out_bytes:rate5m
- expr: sum(ceph_osd_op_r_out_bytes{})
record: ceph_osd_op_r_out_bytes:sum
- expr: sum(rate(ceph_osd_op_w{}[5m]))
record: ceph_osd_op_w:rate5m
- expr: sum(rate(ceph_osd_op_w_in_bytes{}[5m]))
record: ceph_osd_op_w_in_bytes:rate5m
- expr: sum(ceph_osd_op_w_in_bytes{})
record: ceph_osd_op_w_in_bytes:sum
- expr: avg(rate(ceph_osd_op_w_latency_sum{}[5m]) / rate(ceph_osd_op_w_latency_count{}[5m])
>= 0)
record: ceph_osd_op_w_latency:avg5m
- expr: sum(ceph_pool_objects{})
record: ceph_pool_objects:sum