Skip to content

Commit

Permalink
Merge pull request #16429 from liewegas/wip-jewel-x
Browse files Browse the repository at this point in the history
qa/suites/upgrade/jewel-x: misc fixes for new health checks
  • Loading branch information
liewegas committed Jul 20, 2017
2 parents ed48ed1 + 7102de8 commit 27e8d75
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 24 deletions.
4 changes: 4 additions & 0 deletions qa/suites/upgrade/jewel-x/parallel/1-jewel-install/jewel.yaml
Expand Up @@ -11,6 +11,10 @@ tasks:
- ceph:
skip_mgr_daemons: true
add_osds_to_crush: true
log-whitelist:
- overall HEALTH_
- (FS_
- (MDS_
- print: "**** done ceph"
- install.upgrade:
mon.a:
Expand Down
Expand Up @@ -80,6 +80,9 @@ tasks:
- workload_x
- upgrade-sequence_x
- print: "**** done parallel -x branch"
- exec:
osd.0:
- ceph osd set-require-min-compat-client luminous
# Run librados tests on the -x upgraded cluster
- install.upgrade:
client.1:
Expand Down Expand Up @@ -221,6 +224,5 @@ upgrade-sequence_x:
- exec:
osd.0:
- ceph osd require-osd-release luminous
- ceph osd set-require-min-compat-client luminous
- ceph.healthy:
- print: "**** done ceph.restart all -x branch mds/osd/mon"
61 changes: 38 additions & 23 deletions qa/tasks/ceph.py
Expand Up @@ -1110,13 +1110,17 @@ def run_daemon(ctx, config, type_):
if config.get('coverage') or config.get('valgrind') is not None:
daemon_signal = 'term'

# create osds in order. (this only matters for pre-luminous, which might
# be hammer, which doesn't take an id_ argument to legacy 'osd create').
osd_uuids = {}
for remote, roles_for_host in daemons.remotes.iteritems():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):
continue
_, _, id_ = teuthology.split_role(role)


if type_ == 'osd':
datadir='/var/lib/ceph/osd/{cluster}-{id}'.format(
cluster=cluster_name, id=id_)
Expand All @@ -1125,29 +1129,40 @@ def run_daemon(ctx, config, type_):
path=datadir + '/fsid',
sudo=True,
).strip()
try:
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'new', osd_uuid, id_,
]
)
except:
# fallback to pre-luminous (hammer or jewel)
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'create', osd_uuid,
]
)
if config.get('add_osds_to_crush'):
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'crush', 'create-or-move', 'osd.' + id_,
'1.0', 'host=localhost', 'root=default',
]
)
osd_uuids[id_] = osd_uuid
for osd_id in range(len(osd_uuids)):
id_ = str(osd_id)
osd_uuid = osd_uuids.get(id_)
try:
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'new', osd_uuid, id_,
]
)
except:
# fallback to pre-luminous (hammer or jewel)
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'create', osd_uuid,
]
)
if config.get('add_osds_to_crush'):
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'crush', 'create-or-move', 'osd.' + id_,
'1.0', 'host=localhost', 'root=default',
]
)

for remote, roles_for_host in daemons.remotes.iteritems():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):
continue
_, _, id_ = teuthology.split_role(role)

run_cmd = [
'sudo',
Expand Down

0 comments on commit 27e8d75

Please sign in to comment.