Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mimic: ceph-volume: zap always skips block.db, leaves them around #30306

Merged
merged 2 commits into from Sep 12, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
39 changes: 33 additions & 6 deletions src/ceph-volume/ceph_volume/devices/lvm/zap.py
@@ -1,12 +1,13 @@
import argparse
import os
import logging
import time

from textwrap import dedent

from ceph_volume import decorators, terminal, process
from ceph_volume.api import lvm as api
from ceph_volume.util import system, encryption, disk, arg_validators
from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int
from ceph_volume.util.device import Device
from ceph_volume.systemd import systemctl

Expand All @@ -17,12 +18,38 @@
def wipefs(path):
"""
Removes the filesystem from an lv or partition.

Environment variables supported::

* ``CEPH_VOLUME_WIPEFS_TRIES``: Defaults to 8
* ``CEPH_VOLUME_WIPEFS_INTERVAL``: Defaults to 5

"""
process.run([
'wipefs',
'--all',
path
])
tries = str_to_int(
os.environ.get('CEPH_VOLUME_WIPEFS_TRIES', 8)
)
interval = str_to_int(
os.environ.get('CEPH_VOLUME_WIPEFS_INTERVAL', 5)
)

for trying in range(tries):
stdout, stderr, exit_code = process.call([
'wipefs',
'--all',
path
])
if exit_code != 0:
# this could narrow the retry by poking in the stderr of the output
# to verify that 'probing initialization failed' appears, but
# better to be broad in this retry to prevent missing on
# a different message that needs to be retried as well
terminal.warning(
'failed to wipefs device, will try again to workaround probable race condition'
)
time.sleep(interval)
else:
return
raise RuntimeError("could not complete wipefs on device: %s" % path)


def zap_data(path):
Expand Down
24 changes: 24 additions & 0 deletions src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -1,3 +1,4 @@
import os
import pytest
from ceph_volume.api import lvm as api
from ceph_volume.devices.lvm import zap
Expand Down Expand Up @@ -167,3 +168,26 @@ def test_multiple_backing_devs_are_found(self, volumes):
assert '/dev/VolGroup/lvjournal' in result
assert '/dev/VolGroup/lvwal' in result
assert '/dev/VolGroup/lvdb' in result


class TestWipeFs(object):

def setup(self):
os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'

def test_works_on_second_try(self, stub_call):
os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
result = zap.wipefs('/dev/sda')
assert result is None

def test_does_not_work_after_several_tries(self, stub_call):
os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
with pytest.raises(RuntimeError):
zap.wipefs('/dev/sda')

def test_does_not_work_default_tries(self, stub_call):
stub_call([('wiping /dev/sda', '', 1)]*8)
with pytest.raises(RuntimeError):
zap.wipefs('/dev/sda')