Skip to content

Commit

Permalink
qa: add tests for adding EC data pools
Browse files Browse the repository at this point in the history
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
  • Loading branch information
batrick committed Dec 20, 2019
1 parent 3e0aee5 commit bf0cf8e
Show file tree
Hide file tree
Showing 2 changed files with 125 additions and 2 deletions.
33 changes: 31 additions & 2 deletions qa/tasks/cephfs/filesystem.py
Expand Up @@ -25,6 +25,30 @@
DAEMON_WAIT_TIMEOUT = 120
ROOT_INO = 1

class FileLayout(object):
def __init__(self, pool=None, pool_namespace=None, stripe_unit=None, stripe_count=None, object_size=None):
self.pool = pool
self.pool_namespace = pool_namespace
self.stripe_unit = stripe_unit
self.stripe_count = stripe_count
self.object_size = object_size

@classmethod
def load_from_ceph(layout_str):
# TODO
pass

def items(self):
if self.pool is not None:
yield ("pool", self.pool)
if self.pool_namespace:
yield ("pool_namespace", self.pool_namespace)
if self.stripe_unit is not None:
yield ("stripe_unit", self.stripe_unit)
if self.stripe_count is not None:
yield ("stripe_count", self.stripe_count)
if self.object_size is not None:
yield ("object_size", self.stripe_size)

class ObjectNotFound(Exception):
def __init__(self, object_name):
Expand Down Expand Up @@ -630,8 +654,13 @@ def get_mds_map(self, status=None):
def get_var(self, var, status=None):
return self.get_mds_map(status=status)[var]

def add_data_pool(self, name):
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
def set_dir_layout(self, mount, path, layout):
for name, value in layout.items():
mount.run_shell(args=["setfattr", "-n", "ceph.dir.layout."+name, "-v", str(value), path])

def add_data_pool(self, name, create=True):
if create:
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
self.get_pool_names(refresh = True)
for poolid, fs_name in self.data_pools.items():
Expand Down
94 changes: 94 additions & 0 deletions qa/tasks/cephfs/test_admin.py
@@ -1,7 +1,10 @@
from teuthology.orchestra.run import CommandFailedError

from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.fuse_mount import FuseMount

from tasks.cephfs.filesystem import FileLayout

class TestAdminCommands(CephFSTestCase):
"""
Tests for administration command.
Expand All @@ -18,6 +21,97 @@ def test_fs_status(self):
s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
self.assertTrue("active" in s)

def _setup_ec_pools(self, n, metadata=True, overwrites=True):
if metadata:
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
self.fs.mon_manager.raw_cluster_cmd(*cmd)
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
if overwrites:
self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')

def test_add_data_pool_root(self):
"""
That a new data pool can be added and used for the root directory.
"""

p = self.fs.add_data_pool("foo")
self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p))

def test_add_data_pool_subdir(self):
"""
That a new data pool can be added and used for a sub-directory.
"""

p = self.fs.add_data_pool("foo")
self.mount_a.run_shell("mkdir subdir")
self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p))

def test_add_data_pool_ec(self):
"""
That a new EC data pool can be added.
"""

n = "test_add_data_pool_ec"
self._setup_ec_pools(n, metadata=False)
p = self.fs.add_data_pool(n+"-data", create=False)

def test_new_default_ec(self):
"""
That a new file system warns/fails with an EC default data pool.
"""

self.fs.delete_all_filesystems()
n = "test_new_default_ec"
self._setup_ec_pools(n)
try:
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
else:
raise
else:
raise RuntimeError("expected failure")

def test_new_default_ec_force(self):
"""
That a new file system succeeds with an EC default data pool with --force.
"""

self.fs.delete_all_filesystems()
n = "test_new_default_ec_force"
self._setup_ec_pools(n)
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")

def test_new_default_ec_no_overwrite(self):
"""
That a new file system fails with an EC default data pool without overwrite.
"""

self.fs.delete_all_filesystems()
n = "test_new_default_ec_no_overwrite"
self._setup_ec_pools(n, overwrites=False)
try:
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
else:
raise
else:
raise RuntimeError("expected failure")
# and even with --force !
try:
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
except CommandFailedError as e:
if e.exitstatus == 22:
pass
else:
raise
else:
raise RuntimeError("expected failure")

class TestConfigCommands(CephFSTestCase):
"""
Test that daemons and clients respond to the otherwise rarely-used
Expand Down

0 comments on commit bf0cf8e

Please sign in to comment.