Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph-volume: adds support to zap encrypted devices #20537

Merged
merged 5 commits into from Feb 22, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions src/ceph-volume/ceph_volume/api/lvm.py
Expand Up @@ -665,6 +665,7 @@ def __init__(self, **kw):
self.lv_api = kw
self.name = kw['lv_name']
self.tags = parse_tags(kw['lv_tags'])
self.encrypted = self.tags.get('ceph.encrypted', '0') == '1'

def __str__(self):
return '<%s>' % self.lv_api['lv_path']
Expand Down
18 changes: 17 additions & 1 deletion src/ceph-volume/ceph_volume/devices/lvm/zap.py
Expand Up @@ -5,7 +5,7 @@

from ceph_volume import decorators, terminal, process
from ceph_volume.api import lvm as api
from ceph_volume.util import system
from ceph_volume.util import system, encryption, disk

logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
Expand Down Expand Up @@ -67,11 +67,27 @@ def zap(self, args):
vg_name = pv.vg_name
lv = api.get_lv(vg_name=vg_name)

dmcrypt = False
dmcrypt_uuid = None
if lv:
osd_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
dmcrypt_uuid = lv.lv_uuid
dmcrypt = lv.encrypted
if system.path_is_mounted(osd_path):
mlogger.info("Unmounting %s", osd_path)
system.unmount(osd_path)
else:
# we're most likely dealing with a partition here, check to
# see if it was encrypted
partuuid = disk.get_partuuid(device)
if encryption.status("/dev/mapper/{}".format(partuuid)):
dmcrypt_uuid = partuuid
dmcrypt = True

if dmcrypt and dmcrypt_uuid:
dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid)
mlogger.info("Closing encrypted path %s", dmcrypt_path)
encryption.dmcrypt_close(dmcrypt_path)

if args.destroy and pv:
logger.info("Found a physical volume created from %s, will destroy all it's vgs and lvs", device)
Expand Down
@@ -0,0 +1,40 @@

- hosts: osds
become: yes
tasks:

- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"

- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

- name: stop ceph-osd@0 daemon
service:
name: ceph-osd@0
state: stopped

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"

- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.0 using test_group/data-lv1
command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -0,0 +1,27 @@

- hosts: osds
become: yes
tasks:

- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"

- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdd2
command: "ceph-volume lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
4 changes: 2 additions & 2 deletions src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
Expand Up @@ -63,9 +63,9 @@ commands=
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests

# destroy an OSD, zap it's device and recreate it using it's ID
create: ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

# retest to ensure cluster came back up correctly
create: testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests

vagrant destroy --force
@@ -0,0 +1,40 @@

- hosts: osds
become: yes
tasks:

- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"

- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1

- name: stop ceph-osd@0 daemon
service:
name: ceph-osd@0
state: stopped

- name: destroy osd.0
command: "ceph osd destroy osd.0 --yes-i-really-mean-it"

- name: zap test_group/data-lv1
command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.0 using test_group/data-lv1
command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -0,0 +1,27 @@

- hosts: osds
become: yes
tasks:

- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped

- name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"

- name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: zap /dev/sdd2
command: "ceph-volume lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1

- name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1