diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/% b/qa/suites/upgrade/hammer-jewel-x/tiering/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml new file mode 100644 index 0000000000000..f4fad2960e2df --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/0-cluster/start.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - wrongly marked me down +roles: +- - mon.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml new file mode 100644 index 0000000000000..f04be6929c17a --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/1-install-hammer-and-upgrade-to-jewel/hammer-to-jewel.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: hammer + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev +- print: '**** done hammer' +- ceph: + fs: xfs diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml new file mode 100644 index 0000000000000..9ab479d39bbb7 --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-ec-pool.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + client.0: + - ceph osd erasure-code-profile set t-profile ruleset-failure-domain=osd k=2 m=1 + - ceph osd pool create base-pool 4 4 erasure t-profile diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml new file mode 100644 index 0000000000000..5a1358149f770 --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/0-create-base-tier/create-replicated-pool.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + client.0: + - ceph osd pool create base-pool 4 diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml new file mode 100644 index 0000000000000..d9cc3489e3b6c --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/2-setup-cache-tiering/1-create-cache-tier.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - ceph osd pool set cache-pool hit_set_type bloom + - ceph osd pool set cache-pool hit_set_count 8 + - ceph osd pool set cache-pool hit_set_period 5 diff --git a/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml b/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml new file mode 100644 index 0000000000000..c58c62c2b23d3 --- /dev/null +++ b/qa/suites/upgrade/hammer-jewel-x/tiering/3-upgrade.yaml @@ -0,0 +1,51 @@ +tasks: +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" + +workload: + sequential: + - rados: + clients: [client.0] + pools: [base-pool] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + flush: 50 + try_flush: 50 + evict: 50 + - print: "**** done rados" + +upgrade-sequence: + sequential: + - install.upgrade: + exclude_packages: + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev + osd.0: + branch: jewel + osd.2: + branch: jewel + - print: "*** done install.upgrade osd.0 and osd.2" + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mon.a, mon.b, mon.c] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart do not wait for healthy" + - exec: + mon.a: + - sleep 300 # http://tracker.ceph.com/issues/17808 + - ceph osd set require_jewel_osds + - ceph.healthy: + - print: "**** done ceph.healthy"