Skip to content

Commit

Permalink
ceph-volume tests/functional update dmcrypt test playbooks to use --c…
Browse files Browse the repository at this point in the history
…luster

Signed-off-by: Alfredo Deza <adeza@redhat.com>
  • Loading branch information
Alfredo Deza committed Sep 24, 2018
1 parent 5f72d43 commit 96e29ad
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 34 deletions.
Expand Up @@ -17,34 +17,34 @@
tasks:

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"

- hosts: osds
become: yes
tasks:

# osd.2 device
- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

# osd.0 lv
- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.0 using test_group/data-lv1
command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand All @@ -59,7 +59,7 @@
tasks:

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"


- hosts: osds
Expand All @@ -68,12 +68,12 @@


- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: prepare osd.0 using test_group/data-lv1
command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand Down
Expand Up @@ -19,10 +19,10 @@
tasks:

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"


- hosts: osds
Expand All @@ -31,33 +31,33 @@

# osd.2 device
- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdd2
command: "ceph-volume lvm zap /dev/sdd2 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

# osd.0 lv
- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdc1
command: "ceph-volume lvm zap /dev/sdc1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: prepare osd.0 again using test_group/data-lv1
command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand Down
Expand Up @@ -17,10 +17,10 @@
tasks:

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"


- hosts: osds
Expand All @@ -29,23 +29,23 @@

# osd.2 device
- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

# osd.0 lv
- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.0 using test_group/data-lv1
command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand All @@ -60,20 +60,20 @@
tasks:

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"


- hosts: osds
become: yes
tasks:

- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: prepare osd.0 using test_group/data-lv1
command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand Down
Expand Up @@ -19,10 +19,10 @@
tasks:

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"


- hosts: osds
Expand All @@ -31,33 +31,33 @@

# osd.2 device
- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdd2
command: "ceph-volume lvm zap /dev/sdd2 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

# osd.0 lv
- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdc1
command: "ceph-volume lvm zap /dev/sdc1 --destroy"
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: prepare osd.0 again using test_group/data-lv1
command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1

Expand Down

0 comments on commit 96e29ad

Please sign in to comment.