Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph-volume: Pacific backports #47413

Merged
merged 18 commits into from Aug 8, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Prev Previous commit
Next Next commit
ceph-volume: fix fast device alloc size on mulitple device
The size computed by get_physical_fast_allocs() was wrong when the
function had multiple devices to treat.

For instance if there is 4 OSDs and 2 fast devices of each 10G while
allocating 2 slots per fast devvices. The behavior before was that each
slot would be 2.5G meaning that both fast devices would half full. The
behavior now is that each slot will take 5G so that the fast devices
would be full.

Fixes: https://tracker.ceph.com/issues/56031
Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@cern.ch>
(cherry picked from commit d0f9e93)
  • Loading branch information
MrFreezeex authored and guits committed Aug 5, 2022
commit 4252cc44211f0ccebf388374744eaa26b32854d3
6 changes: 1 addition & 5 deletions src/ceph-volume/ceph_volume/devices/lvm/batch.py
Expand Up @@ -119,14 +119,10 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar
continue
# any LV present is considered a taken slot
occupied_slots = len(dev.lvs)
# prior to v15.2.8, db/wal deployments were grouping multiple fast devices into single VGs - we need to
# multiply requested_slots (per device) by the number of devices in the VG in order to ensure that
# abs_size is calculated correctly from vg_size
slots_for_vg = len(vg_devices) * requested_slots
dev_size = dev.vg_size[0]
# this only looks at the first vg on device, unsure if there is a better
# way
abs_size = disk.Size(b=int(dev_size / slots_for_vg))
abs_size = disk.Size(b=int(dev_size / requested_slots))
free_size = dev.vg_free[0]
relative_size = int(abs_size) / dev_size
if requested_size:
Expand Down
25 changes: 10 additions & 15 deletions src/ceph-volume/ceph_volume/tests/conftest.py
Expand Up @@ -58,9 +58,7 @@ def mock_lv():
return dev
return mock_lv


@pytest.fixture
def mock_devices_available():
def mock_device():
dev = create_autospec(device.Device)
dev.path = '/dev/foo'
dev.vg_name = 'vg_foo'
Expand All @@ -69,21 +67,18 @@ def mock_devices_available():
dev.available_lvm = True
dev.vg_size = [21474836480]
dev.vg_free = dev.vg_size
return [dev]
dev.lvs = []
return dev

@pytest.fixture(params=range(1,3))
def mock_devices_available(request):
ret = []
for _ in range(request.param):
ret.append(mock_device())
return ret

@pytest.fixture
def mock_device_generator():
def mock_device():
dev = create_autospec(device.Device)
dev.path = '/dev/foo'
dev.vg_name = 'vg_foo'
dev.lv_name = 'lv_foo'
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
dev.available_lvm = True
dev.vg_size = [21474836480]
dev.vg_free = dev.vg_size
dev.lvs = []
return dev
return mock_device


Expand Down
10 changes: 10 additions & 0 deletions src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
Expand Up @@ -216,6 +216,16 @@ def test_get_physical_fast_allocs_length(self, factory,
'block_db', 2, 2, args)
assert len(fast) == 2

def test_get_physical_fast_allocs_abs_size(self, factory,
conf_ceph_stub,
mock_devices_available):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
fasts = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', 2, 2, args)
for fast, dev in zip(fasts, mock_devices_available):
assert fast[2] == int(dev.vg_size[0] / 2)

def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub,
mock_lv_device_generator):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
Expand Down