Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

qa: snap replicator tests #37490

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions ceph.spec.in
Original file line number Diff line number Diff line change
Expand Up @@ -1663,6 +1663,7 @@ fi
%{_datadir}/ceph/mgr/iostat
%{_datadir}/ceph/mgr/localpool
%{_datadir}/ceph/mgr/mds_autoscaler
%{_datadir}/ceph/mgr/mirroring
%{_datadir}/ceph/mgr/orchestrator
%{_datadir}/ceph/mgr/osd_perf_query
%{_datadir}/ceph/mgr/osd_support
Expand Down
1 change: 1 addition & 0 deletions debian/ceph-mgr-modules-core.install
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ usr/share/ceph/mgr/influx
usr/share/ceph/mgr/insights
usr/share/ceph/mgr/iostat
usr/share/ceph/mgr/localpool
usr/share/ceph/mgr/mirroring
usr/share/ceph/mgr/orchestrator
usr/share/ceph/mgr/osd_perf_query
usr/share/ceph/mgr/osd_support
Expand Down
Empty file added qa/suites/fs/mirror/%
Empty file.
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/.qa
6 changes: 6 additions & 0 deletions qa/suites/fs/mirror/base/install.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
meta:
- desc: install cephfs-mirror package in cluster
tasks:
- install:
extra_packages: [cephfs-mirror]
- ceph:
Empty file.
5 changes: 5 additions & 0 deletions qa/suites/fs/mirror/cephfs-mirror/one-per-cluster.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
meta:
- desc: run one cephfs-mirror daemon on primary cluster
tasks:
- cephfs-mirror:
client: client.mirror
Empty file added qa/suites/fs/mirror/clients/+
Empty file.
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/clients/.qa
18 changes: 18 additions & 0 deletions qa/suites/fs/mirror/clients/mirror.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
meta:
- desc: configure the permissions for client.mirror
overrides:
ceph:
conf:
client:
debug cephfs_mirror: 20
log to stderr: false
# make these predictable
client.mirror:
admin socket: /var/run/ceph/cephfs-mirror.asok
pid file: /var/run/ceph/cephfs-mirror.pid
tasks:
- exec:
client.mirror:
- "sudo ceph auth caps client.mirror mon 'allow r' mds 'allow r' osd 'allow rw object_prefix cephfs_mirror' mgr 'allow r'"
client.mirror_remote:
- "sudo ceph auth caps client.mirror_remote mon 'allow r' mds 'allow all' osd 'allow rw' mgr 'allow r'"
Empty file added qa/suites/fs/mirror/cluster/+
Empty file.
16 changes: 16 additions & 0 deletions qa/suites/fs/mirror/cluster/1-node.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
meta:
- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 5 mdss
roles:
- - mon.a
- mgr.x
- mds.a
- mds.b
- mds.c
- mds.d
- mds.e
- osd.0
- osd.1
- osd.2
- client.0
- client.mirror
- client.mirror_remote
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/mount/.qa
7 changes: 7 additions & 0 deletions qa/suites/fs/mirror/mount/fuse.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
tasks:
- ceph-fuse: [client.0]
- ceph-fuse:
client.mirror_remote:
newfs: true
name: mirror_fs
auth_id: mirror_remote
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/objectstore
Empty file added qa/suites/fs/mirror/overrides/+
Empty file.
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/overrides/.qa
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/overrides/whitelist_health.yaml
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/supported-random-distros$
1 change: 1 addition & 0 deletions qa/suites/fs/mirror/tasks/.qa
10 changes: 10 additions & 0 deletions qa/suites/fs/mirror/tasks/mirror.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
overrides:
ceph:
conf:
mgr:
debug client: 10

tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_mirroring
3 changes: 2 additions & 1 deletion qa/tasks/ceph_fuse.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ def task(ctx, config):
if id_ not in all_mounts:
fuse_mount = FuseMount(ctx=ctx, client_config=client_config,
test_dir=testdir, client_id=auth_id,
client_remote=remote, brxnet=brxnet)
client_remote=remote, brxnet=brxnet,
cephfs_name=client_config.get("name", None))
all_mounts[id_] = fuse_mount
else:
# Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
Expand Down
6 changes: 5 additions & 1 deletion qa/tasks/cephfs/cephfs_test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ class CephFSTestCase(CephTestCase):
# requires REQUIRE_FILESYSTEM = True
REQUIRE_RECOVERY_FILESYSTEM = False

# create a backup filesystem if required.
# required REQUIRE_FILESYSTEM enabled
REQUIRE_BACKUP_FILESYSTEM = False

LOAD_SETTINGS = [] # type: ignore

def _save_mount_details(self):
Expand Down Expand Up @@ -154,7 +158,7 @@ def setUp(self):
# test, delete them
for entry in self.auth_list():
ent_type, ent_id = entry['entity'].split(".")
if ent_type == "client" and ent_id not in client_mount_ids and ent_id != "admin":
if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])

if self.REQUIRE_FILESYSTEM:
Expand Down
2 changes: 1 addition & 1 deletion qa/tasks/cephfs/fuse_mount.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def mount(self, mntopts=[], createfs=True, check_status=True, **kwargs):
# TODO: don't call setupfs() from within mount(), since it's
# absurd. The proper order should be: create FS first and then
# call mount().
self.setupfs(name=self.cephfs_name)
self.setupfs(name=self.cephfs_name, create=True)

try:
return self._mount(mntopts, check_status)
Expand Down
7 changes: 5 additions & 2 deletions qa/tasks/cephfs/mount.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,11 +150,14 @@ def assert_and_log_minimum_mount_details(self):
def is_mounted(self):
return self.mounted

def setupfs(self, name=None):
def setupfs(self, name=None, create=False):
if name is None and self.fs is not None:
# Previous mount existed, reuse the old name
name = self.fs.name
self.fs = Filesystem(self.ctx, name=name)
self.fs = Filesystem(self.ctx, name=name, create=create)
self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
'enable_multiple', 'true',
'--yes-i-really-mean-it')
log.info('Wait for MDS to reach steady state...')
self.fs.wait_for_daemons()
log.info('Ready to start {}...'.format(type(self).__name__))
Expand Down