Skip to content

Commit

Permalink
Merge PR ceph#42000 into master
Browse files Browse the repository at this point in the history
* refs/pull/42000/head:
	qa: update rhel kclient to setup container tools
	qa: stop overriding distro for k-testing
	qa: only use RHEL for workload testing
	qa: convert fs:workload to use cephadm
	qa: split fs begin task
	qa/tasks/cephadm: setup CephManager when OSDs are provisioned
	qa/tasks/cephadm: setup file system if MDS are provisioned

Reviewed-by: Sage Weil <sage@redhat.com>
Reviewed-by: Venky Shankar <vshankar@redhat.com>
  • Loading branch information
batrick committed Feb 9, 2022
2 parents 1a06950 + 0fcf892 commit e883dc3
Show file tree
Hide file tree
Showing 56 changed files with 103 additions and 39 deletions.
File renamed without changes.
4 changes: 0 additions & 4 deletions qa/cephfs/begin.yaml → qa/cephfs/begin/0-install.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
log-rotate:
ceph-mds: 10G
ceph-osd: 10G
tasks:
- install:
extra_packages:
Expand Down Expand Up @@ -53,7 +50,6 @@ tasks:
- libtool
- libuuid-devel
- xfsprogs-devel
- ceph:
syslog:
ignorelist:
- WARNING*.*check_session_state
Expand Down
5 changes: 5 additions & 0 deletions qa/cephfs/begin/1-ceph.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
log-rotate:
ceph-mds: 10G
ceph-osd: 10G
tasks:
- ceph:
3 changes: 3 additions & 0 deletions qa/cephfs/begin/2-logrotate.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
log-rotate:
ceph-mds: 10G
ceph-osd: 10G

This file was deleted.

This file was deleted.

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/32bits/begin
1 change: 0 additions & 1 deletion qa/suites/fs/32bits/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/bugs/client_trim_caps/begin
1 change: 0 additions & 1 deletion qa/suites/fs/bugs/client_trim_caps/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/full/begin
1 change: 0 additions & 1 deletion qa/suites/fs/full/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/functional/begin
1 change: 0 additions & 1 deletion qa/suites/fs/functional/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/libcephfs/begin
1 change: 0 additions & 1 deletion qa/suites/fs/libcephfs/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/mirror-ha/begin
1 change: 0 additions & 1 deletion qa/suites/fs/mirror-ha/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/mirror/begin
1 change: 0 additions & 1 deletion qa/suites/fs/mirror/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/mixed-clients/begin
1 change: 0 additions & 1 deletion qa/suites/fs/mixed-clients/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/multiclient/begin
1 change: 0 additions & 1 deletion qa/suites/fs/multiclient/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/multifs/begin
1 change: 0 additions & 1 deletion qa/suites/fs/multifs/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/permission/begin
1 change: 0 additions & 1 deletion qa/suites/fs/permission/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/shell/begin
1 change: 0 additions & 1 deletion qa/suites/fs/shell/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/snaps/begin
1 change: 0 additions & 1 deletion qa/suites/fs/snaps/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/thrash/multifs/begin
1 change: 0 additions & 1 deletion qa/suites/fs/thrash/multifs/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/thrash/workloads/begin
1 change: 0 additions & 1 deletion qa/suites/fs/thrash/workloads/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/top/begin
1 change: 0 additions & 1 deletion qa/suites/fs/top/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/traceless/begin
1 change: 0 additions & 1 deletion qa/suites/fs/traceless/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/valgrind/begin
1 change: 0 additions & 1 deletion qa/suites/fs/valgrind/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/verify/begin
1 change: 0 additions & 1 deletion qa/suites/fs/verify/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/volumes/begin
1 change: 0 additions & 1 deletion qa/suites/fs/volumes/begin.yaml

This file was deleted.

1 change: 1 addition & 0 deletions qa/suites/fs/workload/0-rhel_8.yaml
1 change: 0 additions & 1 deletion qa/suites/fs/workload/begin.yaml

This file was deleted.

Empty file added qa/suites/fs/workload/begin/+
Empty file.
1 change: 1 addition & 0 deletions qa/suites/fs/workload/begin/.qa
1 change: 1 addition & 0 deletions qa/suites/fs/workload/begin/0-install.yaml
21 changes: 21 additions & 0 deletions qa/suites/fs/workload/begin/1-cephadm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
overrides:
ceph:
conf:
osd:
osd shutdown pgref assert: true
tasks:
- cephadm:
roleless: false
- cephadm.shell:
mon.a:
- ceph orch status
- ceph orch ps
- ceph orch ls
- ceph orch host ls
- ceph orch device ls
- cephadm.shell:
mon.a:
- ceph fs dump
- ceph osd dump
- fs.ready:
timeout: 300
1 change: 1 addition & 0 deletions qa/suites/fs/workload/begin/2-logrotate.yaml
1 change: 0 additions & 1 deletion qa/suites/fs/workload/distro

This file was deleted.

60 changes: 50 additions & 10 deletions qa/tasks/cephadm.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import uuid
import yaml

from copy import deepcopy
from io import BytesIO, StringIO
from tarfile import ReadError
from tasks.ceph_manager import CephManager
Expand All @@ -20,6 +21,7 @@
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.config import config as teuth_config
from textwrap import dedent
from tasks.cephfs.filesystem import MDSCluster, Filesystem

# these items we use from ceph.py should probably eventually move elsewhere
from tasks.ceph import get_mons, healthy
Expand Down Expand Up @@ -811,6 +813,16 @@ def ceph_osds(ctx, config):
if int(j.get('num_up_osds', 0)) == num_osds:
break;

if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[cluster_name] = CephManager(
ctx.ceph[cluster_name].bootstrap_remote,
ctx=ctx,
logger=log.getChild('ceph_manager.' + cluster_name),
cluster=cluster_name,
cephadm=True,
)

yield
finally:
pass
Expand Down Expand Up @@ -852,6 +864,43 @@ def ceph_mdss(ctx, config):

yield

@contextlib.contextmanager
def cephfs_setup(ctx, config):
mdss = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))

# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
if len(mdss) > 0:
log.info('Setting up CephFS filesystem(s)...')
cephfs_config = config.get('cephfs', {})
fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}])
set_allow_multifs = len(fs_configs) > 1

# wait for standbys to become available (slow due to valgrind, perhaps)
mdsc = MDSCluster(ctx)
with contextutil.safe_while(sleep=2,tries=150) as proceed:
while proceed():
if len(mdsc.get_standby_daemons()) >= len(mdss):
break

fss = []
for fs_config in fs_configs:
assert isinstance(fs_config, dict)
name = fs_config.pop('name')
temp = deepcopy(cephfs_config)
teuthology.deep_merge(temp, fs_config)
fs = Filesystem(ctx, fs_config=temp, name=name, create=True)
if set_allow_multifs:
fs.set_allow_multifs()
set_allow_multifs = False
fss.append(fs)

yield

for fs in fss:
fs.destroy()
else:
yield

@contextlib.contextmanager
def ceph_monitoring(daemon_type, ctx, config):
Expand Down Expand Up @@ -1524,6 +1573,7 @@ def task(ctx, config):
lambda: ceph_mgrs(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: ceph_rgw(ctx=ctx, config=config),
lambda: ceph_iscsi(ctx=ctx, config=config),
lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
Expand All @@ -1533,16 +1583,6 @@ def task(ctx, config):
lambda: ceph_clients(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
):
if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[cluster_name] = CephManager(
ctx.ceph[cluster_name].bootstrap_remote,
ctx=ctx,
logger=log.getChild('ceph_manager.' + cluster_name),
cluster=cluster_name,
cephadm=True,
)

try:
if config.get('wait-for-healthy', True):
healthy(ctx=ctx, config=config)
Expand Down

0 comments on commit e883dc3

Please sign in to comment.