Skip to content

Commit

Permalink
Merge PR #30816 into master
Browse files Browse the repository at this point in the history
* refs/pull/30816/head:
	qa: use small default pg count for CephFS pools
	*: remove config of "mon pg warn min per osd" in testing

Reviewed-by: Laura Paduano <lpaduano@suse.com>
  • Loading branch information
batrick committed Oct 15, 2019
2 parents 6a2f083 + fc88e6c commit 3ebc546
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 25 deletions.
1 change: 0 additions & 1 deletion qa/tasks/ceph.conf.template
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
ms die on old message = true
ms die on bug = true

mon pg warn min per osd = 1
mon max pg per osd = 10000 # >= luminous
mon pg warn max object skew = 0

Expand Down
23 changes: 7 additions & 16 deletions qa/tasks/cephfs/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,16 +515,9 @@ def set_allow_standby_replay(self, yes):
def set_allow_new_snaps(self, yes):
self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it')

def get_pgs_per_fs_pool(self):
"""
Calculate how many PGs to use when creating a pool, in order to avoid raising any
health warnings about mon_pg_warn_min_per_osd
:return: an integer number of PGs
"""
pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
return pg_warn_min_per_osd * osd_count
# In Octopus+, the PG count can be omitted to use the default. We keep the
# hard-coded value for deployments of Mimic/Nautilus.
pgs_per_fs_pool = 8

def create(self):
if self.name is None:
Expand All @@ -538,10 +531,8 @@ def create(self):

log.info("Creating filesystem '{0}'".format(self.name))

pgs_per_fs_pool = self.get_pgs_per_fs_pool()

self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.metadata_pool_name, pgs_per_fs_pool.__str__())
self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
if self.metadata_overlay:
self.mon_manager.raw_cluster_cmd('fs', 'new',
self.name, self.metadata_pool_name, data_pool_name,
Expand All @@ -554,15 +545,15 @@ def create(self):
self.mon_manager.raw_cluster_cmd(*cmd)
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'create',
data_pool_name, pgs_per_fs_pool.__str__(), 'erasure',
data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure',
data_pool_name)
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'set',
data_pool_name, 'allow_ec_overwrites', 'true')
else:
self.mon_manager.raw_cluster_cmd(
'osd', 'pool', 'create',
data_pool_name, pgs_per_fs_pool.__str__())
data_pool_name, self.pgs_per_fs_pool.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'new',
self.name, self.metadata_pool_name, data_pool_name)
self.check_pool_application(self.metadata_pool_name)
Expand Down Expand Up @@ -634,7 +625,7 @@ def get_var(self, var, status=None):
return self.get_mds_map(status=status)[var]

def add_data_pool(self, name):
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__())
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
self.get_pool_names(refresh = True)
for poolid, fs_name in self.data_pools.items():
Expand Down
2 changes: 1 addition & 1 deletion qa/tasks/cephfs/test_data_scan.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ def test_pg_files(self):
pgs_to_files[pgid].append(file_path)
log.info("{0}: {1}".format(file_path, pgid))

pg_count = self.fs.get_pgs_per_fs_pool()
pg_count = self.fs.pgs_per_fs_pool
for pg_n in range(0, pg_count):
pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n)
out = self.fs.data_scan(["pg_files", "mydir", pg_str])
Expand Down
4 changes: 2 additions & 2 deletions qa/tasks/cephfs/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_fs_new(self):
'--yes-i-really-really-mean-it')
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
self.fs.get_pgs_per_fs_pool().__str__())
self.fs.pgs_per_fs_pool.__str__())

dummyfile = '/etc/fstab'

Expand Down Expand Up @@ -105,7 +105,7 @@ def get_pool_df(fs, name):
'--yes-i-really-really-mean-it')
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.fs.metadata_pool_name,
self.fs.get_pgs_per_fs_pool().__str__())
self.fs.pgs_per_fs_pool.__str__())
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
self.fs.metadata_pool_name,
data_pool_name)
Expand Down
4 changes: 0 additions & 4 deletions qa/tasks/vstart_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -1078,10 +1078,6 @@ def _prefix(self):
def set_clients_block(self, blocked, mds_id=None):
raise NotImplementedError()

def get_pgs_per_fs_pool(self):
# FIXME: assuming there are 3 OSDs
return 3 * int(self.get_config('mon_pg_warn_min_per_osd'))


class InteractiveFailureResult(unittest.TextTestResult):
"""
Expand Down
1 change: 0 additions & 1 deletion src/vstart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1156,7 +1156,6 @@ if [ $CEPH_NUM_MON -gt 0 ]; then
[global]
osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
osd_pool_default_min_size = 1
mon_pg_warn_min_per_osd = 3
[mon]
mon_osd_reporter_subtree_level = osd
Expand Down

0 comments on commit 3ebc546

Please sign in to comment.