Skip to content

Commit

Permalink
Merge pull request #14692 from smithfarm/wip-fix-hammer-jewel-x
Browse files Browse the repository at this point in the history
qa/suites/upgrade: add tiering test to hammer-jewel-x

Reviewed-by: Kefu Chai <kchai@redhat.com>
  • Loading branch information
smithfarm committed Apr 26, 2017
2 parents 557fee8 + 8d0c229 commit 0d6953e
Show file tree
Hide file tree
Showing 8 changed files with 101 additions and 0 deletions.
Empty file.
16 changes: 16 additions & 0 deletions qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml
@@ -0,0 +1,16 @@
overrides:
ceph:
conf:
mon:
mon warn on legacy crush tunables: false
log-whitelist:
- wrongly marked me down
roles:
- - mon.a
- osd.0
- osd.1
- - mon.b
- mon.c
- osd.2
- osd.3
- - client.0
@@ -0,0 +1,11 @@
tasks:
- install:
branch: hammer
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
- print: '**** done hammer'
- ceph:
fs: xfs
Empty file.
@@ -0,0 +1,5 @@
tasks:
- exec:
client.0:
- ceph osd erasure-code-profile set t-profile ruleset-failure-domain=osd k=2 m=1
- ceph osd pool create base-pool 4 4 erasure t-profile
@@ -0,0 +1,4 @@
tasks:
- exec:
client.0:
- ceph osd pool create base-pool 4
@@ -0,0 +1,14 @@
overrides:
ceph:
log-whitelist:
- must scrub before tier agent can activate
tasks:
- exec:
client.0:
- ceph osd pool create cache-pool 4
- ceph osd tier add base-pool cache-pool
- ceph osd tier cache-mode cache-pool writeback
- ceph osd tier set-overlay base-pool cache-pool
- ceph osd pool set cache-pool hit_set_type bloom
- ceph osd pool set cache-pool hit_set_count 8
- ceph osd pool set cache-pool hit_set_period 5
51 changes: 51 additions & 0 deletions qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml
@@ -0,0 +1,51 @@
tasks:
- parallel:
- workload
- upgrade-sequence
- print: "**** done parallel"

workload:
sequential:
- rados:
clients: [client.0]
pools: [base-pool]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
flush: 50
try_flush: 50
evict: 50
- print: "**** done rados"

upgrade-sequence:
sequential:
- install.upgrade:
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
osd.0:
branch: jewel
osd.2:
branch: jewel
- print: "*** done install.upgrade osd.0 and osd.2"
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3]
wait-for-healthy: false
wait-for-osds-up: true
- ceph.restart:
daemons: [mon.a, mon.b, mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart do not wait for healthy"
- exec:
mon.a:
- sleep 300 # http://tracker.ceph.com/issues/17808
- ceph osd set require_jewel_osds
- ceph.healthy:
- print: "**** done ceph.healthy"

0 comments on commit 0d6953e

Please sign in to comment.