Skip to content

Commit

Permalink
Merge PR #27387 into nautilus
Browse files Browse the repository at this point in the history
* refs/pull/27387/head:
	mgr/pg_autoscaler: apply bias to pg_num selection
	mgr/pg_autoscaler: include pg_autoscale_bias in autoscale-status table
	osd/osd_types,mon: add pg_autoscale_bias pool property

Reviewed-by: xie xingguo <xie.xingguo@zte.com.cn>
Reviewed-by: Neha Ojha <nojha@redhat.com>
  • Loading branch information
liewegas committed Apr 10, 2019
2 parents 6bef0f7 + 791dd8c commit 571be91
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 7 deletions.
4 changes: 2 additions & 2 deletions src/mon/MonCommands.h
Expand Up @@ -1007,11 +1007,11 @@ COMMAND("osd pool rename " \
"rename <srcpool> to <destpool>", "osd", "rw")
COMMAND("osd pool get " \
"name=pool,type=CephPoolname " \
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_num_min|target_size_bytes|target_size_ratio", \
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|erasure_code_profile|min_read_recency_for_promote|all|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_autoscale_bias|pg_num_min|target_size_bytes|target_size_ratio", \
"get pool parameter <var>", "osd", "r")
COMMAND("osd pool set " \
"name=pool,type=CephPoolname " \
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|pgp_num_actual|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_num_min|target_size_bytes|target_size_ratio " \
"name=var,type=CephChoices,strings=size|min_size|pg_num|pgp_num|pgp_num_actual|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|write_fadvise_dontneed|noscrub|nodeep-scrub|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|use_gmt_hitset|target_max_bytes|target_max_objects|cache_target_dirty_ratio|cache_target_dirty_high_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|min_read_recency_for_promote|min_write_recency_for_promote|fast_read|hit_set_grade_decay_rate|hit_set_search_last_n|scrub_min_interval|scrub_max_interval|deep_scrub_interval|recovery_priority|recovery_op_priority|scrub_priority|compression_mode|compression_algorithm|compression_required_ratio|compression_max_blob_size|compression_min_blob_size|csum_type|csum_min_block|csum_max_block|allow_ec_overwrites|fingerprint_algorithm|pg_autoscale_mode|pg_autoscale_bias|pg_num_min|target_size_bytes|target_size_ratio " \
"name=val,type=CephString " \
"name=yes_i_really_mean_it,type=CephBool,req=false", \
"set pool parameter <var> to <val>", "osd", "rw")
Expand Down
11 changes: 10 additions & 1 deletion src/mon/OSDMonitor.cc
Expand Up @@ -4553,7 +4553,8 @@ namespace {
COMPRESSION_MODE, COMPRESSION_ALGORITHM, COMPRESSION_REQUIRED_RATIO,
COMPRESSION_MAX_BLOB_SIZE, COMPRESSION_MIN_BLOB_SIZE,
CSUM_TYPE, CSUM_MAX_BLOCK, CSUM_MIN_BLOCK, FINGERPRINT_ALGORITHM,
PG_AUTOSCALE_MODE, PG_NUM_MIN, TARGET_SIZE_BYTES, TARGET_SIZE_RATIO };
PG_AUTOSCALE_MODE, PG_NUM_MIN, TARGET_SIZE_BYTES, TARGET_SIZE_RATIO,
PG_AUTOSCALE_BIAS };

std::set<osd_pool_get_choices>
subtract_second_from_first(const std::set<osd_pool_get_choices>& first,
Expand Down Expand Up @@ -5253,6 +5254,7 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
{"pg_num_min", PG_NUM_MIN},
{"target_size_bytes", TARGET_SIZE_BYTES},
{"target_size_ratio", TARGET_SIZE_RATIO},
{"pg_autoscale_bias", PG_AUTOSCALE_BIAS},
};

typedef std::set<osd_pool_get_choices> choices_set_t;
Expand Down Expand Up @@ -5468,6 +5470,7 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
case PG_NUM_MIN:
case TARGET_SIZE_BYTES:
case TARGET_SIZE_RATIO:
case PG_AUTOSCALE_BIAS:
pool_opts_t::key_t key = pool_opts_t::get_opt_desc(i->first).key;
if (p->opts.is_set(key)) {
if(*it == CSUM_TYPE) {
Expand Down Expand Up @@ -5624,6 +5627,7 @@ bool OSDMonitor::preprocess_command(MonOpRequestRef op)
case PG_NUM_MIN:
case TARGET_SIZE_BYTES:
case TARGET_SIZE_RATIO:
case PG_AUTOSCALE_BIAS:
for (i = ALL_CHOICES.begin(); i != ALL_CHOICES.end(); ++i) {
if (i->second == *it)
break;
Expand Down Expand Up @@ -7618,6 +7622,11 @@ int OSDMonitor::prepare_command_pool_set(const cmdmap_t& cmdmap,
<< "Ceph internal implementation restrictions";
return -EINVAL;
}
} else if (var == "pg_autoscale_bias") {
if (f < 0.0 || f > 1000.0) {
ss << "pg_autoscale_bias must be between 0 and 1000";
return -EINVAL;
}
}

pool_opts_t::opt_desc_t desc = pool_opts_t::get_opt_desc(var);
Expand Down
4 changes: 3 additions & 1 deletion src/osd/osd_types.cc
Expand Up @@ -1158,7 +1158,9 @@ static opt_mapping_t opt_mapping = boost::assign::map_list_of
("target_size_bytes", pool_opts_t::opt_desc_t(
pool_opts_t::TARGET_SIZE_BYTES, pool_opts_t::INT))
("target_size_ratio", pool_opts_t::opt_desc_t(
pool_opts_t::TARGET_SIZE_RATIO, pool_opts_t::DOUBLE));
pool_opts_t::TARGET_SIZE_RATIO, pool_opts_t::DOUBLE))
("pg_autoscale_bias", pool_opts_t::opt_desc_t(
pool_opts_t::PG_AUTOSCALE_BIAS, pool_opts_t::DOUBLE));

bool pool_opts_t::is_opt_name(const std::string& name)
{
Expand Down
1 change: 1 addition & 0 deletions src/osd/osd_types.h
Expand Up @@ -1019,6 +1019,7 @@ class pool_opts_t {
PG_NUM_MIN, // min pg_num
TARGET_SIZE_BYTES, // total bytes in pool
TARGET_SIZE_RATIO, // fraction of total cluster
PG_AUTOSCALE_BIAS,
};

enum type_t {
Expand Down
12 changes: 9 additions & 3 deletions src/pybind/mgr/pg_autoscaler/module.py
Expand Up @@ -109,6 +109,7 @@ def _command_autoscale_status(self, cmd):
table = PrettyTable(['POOL', 'SIZE', 'TARGET SIZE',
'RATE', 'RAW CAPACITY',
'RATIO', 'TARGET RATIO',
'BIAS',
'PG_NUM',
# 'IDEAL',
'NEW PG_NUM', 'AUTOSCALE'],
Expand All @@ -120,6 +121,7 @@ def _command_autoscale_status(self, cmd):
table.align['RAW CAPACITY'] = 'r'
table.align['RATIO'] = 'r'
table.align['TARGET RATIO'] = 'r'
table.align['BIAS'] = 'r'
table.align['PG_NUM'] = 'r'
# table.align['IDEAL'] = 'r'
table.align['NEW PG_NUM'] = 'r'
Expand All @@ -145,6 +147,7 @@ def _command_autoscale_status(self, cmd):
mgr_util.format_bytes(p['subtree_capacity'], 6),
'%.4f' % p['capacity_ratio'],
tr,
p['bias'],
p['pg_num_target'],
# p['pg_num_ideal'],
final,
Expand Down Expand Up @@ -265,6 +268,7 @@ def _get_pool_status(
raw_used_rate = osdmap.pool_raw_used_rate(pool_id)

pool_logical_used = pool_stats[pool_id]['bytes_used']
bias = p['options'].get('pg_autoscale_bias', 1.0)
target_bytes = p['options'].get('target_size_bytes', 0)

# What proportion of space are we using?
Expand All @@ -278,16 +282,17 @@ def _get_pool_status(
final_ratio = max(capacity_ratio, target_ratio)

# So what proportion of pg allowance should we be using?
pool_pg_target = (final_ratio * root_map[root_id].pg_target) / raw_used_rate
pool_pg_target = (final_ratio * root_map[root_id].pg_target) / raw_used_rate * bias

final_pg_target = max(p['options'].get('pg_num_min', PG_NUM_MIN),
nearest_power_of_two(pool_pg_target))

self.log.info("Pool '{0}' root_id {1} using {2} of space, "
"pg target {3} quantized to {4} (current {5})".format(
self.log.info("Pool '{0}' root_id {1} using {2} of space, bias {3}, "
"pg target {4} quantized to {5} (current {6})".format(
p['pool_name'],
root_id,
final_ratio,
bias,
pool_pg_target,
final_pg_target,
p['pg_num_target']
Expand Down Expand Up @@ -318,6 +323,7 @@ def _get_pool_status(
'pg_num_ideal': int(pool_pg_target),
'pg_num_final': final_pg_target,
'would_adjust': adjust,
'bias': p.get('options', {}).get('pg_autoscale_bias', 1.0),
});

return (ret, root_map, pool_root)
Expand Down

0 comments on commit 571be91

Please sign in to comment.