Skip to content
This repository has been archived by the owner on Sep 26, 2019. It is now read-only.

Commit

Permalink
Add ceph partitioning scheme support for fuel_agent
Browse files Browse the repository at this point in the history
* Add ability to set GUID for partitions;
* Bare unit tests;
* Backport ceph code from pmanager.py

Partially implements: blueprint image-based-provisioning
Change-Id: Id23033a537b150875e452f81bd52d63f6286fa1d
  • Loading branch information
Alexander Gordeev committed Sep 15, 2014
1 parent f56cde9 commit 51d701b
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 2 deletions.
48 changes: 48 additions & 0 deletions fuel_agent/drivers/nailgun.py
Expand Up @@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import math

from fuel_agent.drivers import ks_spaces_validator
from fuel_agent import errors
from fuel_agent import objects
Expand Down Expand Up @@ -102,11 +104,28 @@ def _getlabel(self, label):
# disk label is > 12 characters.
return ' -L {0} '.format(label[:12])

def _get_partition_count(self, name):
count = 0
for disk in self.ks_disks:
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count

def _num_ceph_journals(self):
return self._get_partition_count('cephjournal')

def _num_ceph_osds(self):
return self._get_partition_count('ceph')

def partition_scheme(self):
data = self.partition_data()
ks_spaces_validator.validate(data)
partition_scheme = objects.PartitionScheme()

ceph_osds = self._num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self._num_ceph_journals()

for disk in self.ks_disks:
parted = partition_scheme.add_parted(
name=self._disk_dev(disk), label='gpt')
Expand All @@ -119,6 +138,35 @@ def partition_scheme(self):
if volume['size'] <= 0:
continue

if volume.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on
# each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)

# No more than 10GB will be allocated to a single journal
# partition
size = volume["size"] / ratio
if size > 10240:
size = 10240

# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left

for i in range(0, end):
journals_left -= 1
if volume['type'] == 'partition':
prt = parted.add_partition(size=size)
if 'partition_guid' in volume:
prt.set_guid(volume['partition_guid'])
continue

if volume['type'] in ('partition', 'pv', 'raid'):
prt = parted.add_partition(size=volume['size'])

Expand Down
2 changes: 2 additions & 0 deletions fuel_agent/manager.py
Expand Up @@ -70,6 +70,8 @@ def do_partitioning(self):
pu.make_partition(prt.device, prt.begin, prt.end, prt.type)
for flag in prt.flags:
pu.set_partition_flag(prt.device, prt.count, flag)
if prt.guid:
pu.set_gpt_type(prt.device, prt.count, prt.guid)

# creating meta disks
for md in self.partition_scheme.mds:
Expand Down
9 changes: 7 additions & 2 deletions fuel_agent/tests/test_manager.py
Expand Up @@ -52,13 +52,14 @@ def test_do_parsing(self, mock_lbd):
@mock.patch.object(lu, 'vgcreate')
@mock.patch.object(lu, 'pvcreate')
@mock.patch.object(mu, 'mdcreate')
@mock.patch.object(pu, 'set_gpt_type')
@mock.patch.object(pu, 'set_partition_flag')
@mock.patch.object(pu, 'make_partition')
@mock.patch.object(pu, 'make_label')
@mock.patch.object(hu, 'list_block_devices')
def test_do_partitioning(self, mock_hu_lbd, mock_pu_ml, mock_pu_mp,
mock_pu_spf, mock_mu_m, mock_lu_p, mock_lu_v,
mock_lu_l, mock_fu_mf):
mock_pu_spf, mock_pu_sgt, mock_mu_m, mock_lu_p,
mock_lu_v, mock_lu_l, mock_fu_mf):
mock_hu_lbd.return_value = test_nailgun.LIST_BLOCK_DEVICES_SAMPLE
self.mgr.do_parsing()
self.mgr.do_partitioning()
Expand Down Expand Up @@ -91,6 +92,10 @@ def test_do_partitioning(self, mock_hu_lbd, mock_pu_ml, mock_pu_mp,
self.assertEqual(mock_pu_spf_expected_calls,
mock_pu_spf.call_args_list)

mock_pu_sgt_expected_calls = [mock.call('/dev/sda', 4, 'fake_guid')]
self.assertEqual(mock_pu_sgt_expected_calls,
mock_pu_sgt.call_args_list)

mock_mu_m_expected_calls = [mock.call('/dev/md0', 'mirror',
'/dev/sda3', '/dev/sdb3',
'/dev/sdc3')]
Expand Down
47 changes: 47 additions & 0 deletions fuel_agent/tests/test_nailgun.py
Expand Up @@ -20,6 +20,24 @@
from fuel_agent.utils import hardware_utils as hu


CEPH_JOURNAL = {
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
}
CEPH_DATA = {
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
PROVISION_SAMPLE_DATA = {
"profile": "ubuntu_1204_x86_64",
"name_servers_search": "\"domain.tld\"",
Expand Down Expand Up @@ -485,3 +503,32 @@ def test_disk_dev_not_found(self, mock_lbd):
}
self.assertRaises(errors.DiskNotFoundError, self.drv._disk_dev,
fake_ks_disk)

def test_get_partition_count(self):
self.assertEqual(3, self.drv._get_partition_count('Boot'))
self.assertEqual(1, self.drv._get_partition_count('TMP'))

@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme_ceph(self, mock_lbd):
#TODO(agordeev): perform better testing of ceph logic
p_data = PROVISION_SAMPLE_DATA.copy()
for i in range(0, 3):
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_JOURNAL)
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_DATA)
self.drv = nailgun.Nailgun(p_data)
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_scheme = self.drv.partition_scheme()
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(1, len(p_scheme.mds))
self.assertEqual(3, len(p_scheme.parteds))
self.assertEqual(3, self.drv._get_partition_count('ceph'))
#NOTE(agordeev): (-2, -1, -1) is the list of ceph data partition counts
# corresponding to (sda, sdb, sdc) disks respectively.
for disk, part in enumerate((-2, -1, -1)):
self.assertEqual(CEPH_DATA['partition_guid'],
p_scheme.parteds[disk].partitions[part].guid)

0 comments on commit 51d701b

Please sign in to comment.