Skip to content

Commit

Permalink
mon: Go into ERR state if multiple PGs are stuck inactive
Browse files Browse the repository at this point in the history
If >=X PGs are stuck inactive longer than 'mon_pg_stuck_threshold'
we go into ERR state.

This is useful for situations where one or more PGs stay stuck in
peering or undersized state due to a OSD failure.

RBD volumes can become fully unresponsive if one or more PGs are inactive.

Fixes: #13923
  • Loading branch information
wido committed Jan 16, 2016
1 parent 3daf908 commit 9fa3419
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 0 deletions.
1 change: 1 addition & 0 deletions src/common/config_opts.h
Expand Up @@ -236,6 +236,7 @@ OPTION(mon_clock_drift_warn_backoff, OPT_FLOAT, 5) // exponential backoff for cl
OPTION(mon_timecheck_interval, OPT_FLOAT, 300.0) // on leader, timecheck (clock drift check) interval (seconds)
OPTION(mon_pg_create_interval, OPT_FLOAT, 30.0) // no more than every 30s
OPTION(mon_pg_stuck_threshold, OPT_INT, 300) // number of seconds after which pgs can be considered inactive, unclean, or stale (see doc/control.rst under dump_stuck for more info)
OPTION(mon_pg_inactive_num, OPT_U64, 0) // the number of PGs which have to be inactive longer then 'mon_pg_stuck_threshold' before health goes into ERR. 0 means disabled, never go into ERR.
OPTION(mon_pg_warn_min_per_osd, OPT_INT, 30) // min # pgs per (in) osd before we warn the admin
OPTION(mon_pg_warn_max_per_osd, OPT_INT, 300) // max # pgs per (in) osd before we warn the admin
OPTION(mon_pg_warn_max_object_skew, OPT_FLOAT, 10.0) // max skew few average in objects per pg
Expand Down
9 changes: 9 additions & 0 deletions src/mon/PGMonitor.cc
Expand Up @@ -2071,10 +2071,12 @@ void PGMonitor::get_health(list<pair<health_status_t,string> >& summary,
ceph::unordered_map<pg_t, pg_stat_t> stuck_pgs;
utime_t now(ceph_clock_now(g_ceph_context));
utime_t cutoff = now - utime_t(g_conf->mon_pg_stuck_threshold, 0);
uint64_t num_inactive_pgs = 0;

pg_map.get_stuck_stats(PGMap::STUCK_INACTIVE, cutoff, stuck_pgs);
if (!stuck_pgs.empty()) {
note["stuck inactive"] = stuck_pgs.size();
num_inactive_pgs += stuck_pgs.size();
if (detail)
note_stuck_detail(PGMap::STUCK_INACTIVE, stuck_pgs, detail);
}
Expand Down Expand Up @@ -2107,10 +2109,17 @@ void PGMonitor::get_health(list<pair<health_status_t,string> >& summary,
pg_map.get_stuck_stats(PGMap::STUCK_STALE, cutoff, stuck_pgs);
if (!stuck_pgs.empty()) {
note["stuck stale"] = stuck_pgs.size();
num_inactive_pgs += stuck_pgs.size();
if (detail)
note_stuck_detail(PGMap::STUCK_STALE, stuck_pgs, detail);
}

if (g_conf->mon_pg_inactive_num > 0 && num_inactive_pgs >= g_conf->mon_pg_inactive_num) {
ostringstream ss;
ss << num_inactive_pgs << " pgs are stuck inactive for more than " << g_conf->mon_pg_stuck_threshold << " seconds";
summary.push_back(make_pair(HEALTH_ERR, ss.str()));
}

if (!note.empty()) {
for (map<string,int>::iterator p = note.begin(); p != note.end(); ++p) {
ostringstream ss;
Expand Down

0 comments on commit 9fa3419

Please sign in to comment.