Skip to content

Commit

Permalink
Update amulet test definitions
Browse files Browse the repository at this point in the history
This change requires the following charm-helpers change
to land first:

 - juju/charm-helpers#32

Change-Id: Iae88b2c11fe9ddcc176075f54a8c075d2dc3ba4c
  • Loading branch information
Ryan Beisner authored and thedac committed Dec 6, 2017
1 parent 0d8bdc3 commit 4e70770
Show file tree
Hide file tree
Showing 12 changed files with 98 additions and 44 deletions.
9 changes: 6 additions & 3 deletions hooks/charmhelpers/contrib/openstack/amulet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']

self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
Expand Down
2 changes: 2 additions & 0 deletions hooks/charmhelpers/contrib/openstack/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
Expand Down
8 changes: 4 additions & 4 deletions hooks/charmhelpers/contrib/storage/linux/ceph.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,12 +377,12 @@ def get_mon_map(service):
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
log("Unable to parse mon_status json: {}. Error: {}"
.format(mon_status, str(v)))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
log("mon_status command failed with message: {}"
.format(str(e)))
raise


Expand Down
2 changes: 2 additions & 0 deletions hooks/charmhelpers/core/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
Expand Down
1 change: 1 addition & 0 deletions hooks/charmhelpers/core/host_factory/ubuntu.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
'yakkety',
'zesty',
'artful',
'bionic',
)


Expand Down
103 changes: 70 additions & 33 deletions tests/basic_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def _add_services(self):
# Note: cinder-backup becomes a cinder subordinate unit.
this_service = {'name': 'cinder-backup'}
other_services = [
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
{'name': 'percona-cluster'},
{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'ceph', 'units': 3},
Expand Down Expand Up @@ -96,10 +96,8 @@ def _configure_services(self):
'admin-token': 'ubuntutesting'
}
pxc_config = {
'dataset-size': '25%',
'innodb-buffer-pool-size': '256M',
'max-connections': 1000,
'root-password': 'ChangeMe123',
'sst-password': 'ChangeMe123',
}
cinder_config = {
'block-device': 'None',
Expand Down Expand Up @@ -239,18 +237,32 @@ def test_102_services(self):
def test_110_users(self):
"""Verify expected users."""
u.log.debug('Checking keystone users...')
expected = [
{'name': 'cinder_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'},
{'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
]

if self._get_openstack_release() < self.xenial_pike:
expected = [{
'name': 'cinder_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost',
}]
else:
expected = [{
'name': 'cinderv3_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost',
}]

expected.append({
'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost',
})

actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
Expand All @@ -275,10 +287,15 @@ def test_112_service_catalog(self):
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null

expected = {
'identity': [endpoint_id],
'volume': [endpoint_id]
}
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later
expected = {'identity': [endpoint_id],
'volumev2': [endpoint_id]}
else:
# Ocata and prior
expected = {'identity': [endpoint_id],
'volume': [endpoint_id]}

actual = self.keystone.service_catalog.get_endpoints()

ret = u.validate_svc_catalog_endpoint_data(expected, actual)
Expand Down Expand Up @@ -478,6 +495,8 @@ def test_206_keystone_cinder_id_relation(self):
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
if self._get_openstack_release() >= self.xenial_pike:
expected['service_username'] = 'cinderv3_cinderv2'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('identity-service cinder', ret)
Expand All @@ -489,14 +508,29 @@ def test_207_cinder_keystone_id_relation(self):
unit = self.cinder_sentry
relation = ['identity-service',
'keystone:identity-service']
expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'private-address': u.valid_ip
}
if self._get_openstack_release() < self.xenial_pike:
expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'private-address': u.valid_ip
}
else:
expected = {
'cinderv2_service': 'cinderv2',
'cinderv2_region': 'RegionOne',
'cinderv2_public_url': u.valid_url,
'cinderv2_internal_url': u.valid_url,
'cinderv2_admin_url': u.valid_url,
'cinderv3_service': 'cinderv3',
'cinderv3_region': 'RegionOne',
'cinderv3_public_url': u.valid_url,
'cinderv3_internal_url': u.valid_url,
'cinderv3_admin_url': u.valid_url,
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder identity-service', ret)
Expand Down Expand Up @@ -816,11 +850,14 @@ def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)

# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Luminous (pike) ceph seems more efficient at disk usage so we cannot
# grantee the ordering of kb_used
if self._get_openstack_release() < self.xenial_mitaka:
# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)

def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
Expand Down
9 changes: 6 additions & 3 deletions tests/charmhelpers/contrib/openstack/amulet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,9 +858,12 @@ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']

self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
Expand Down
2 changes: 2 additions & 0 deletions tests/charmhelpers/core/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
Expand Down
1 change: 1 addition & 0 deletions tests/charmhelpers/core/host_factory/ubuntu.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
'yakkety',
'zesty',
'artful',
'bionic',
)


Expand Down
3 changes: 3 additions & 0 deletions tests/gate-basic-artful-pike
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,6 @@ from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='artful')
deployment.run_tests()

# NOTE(beisner): Artful target disabled, pending bug:
# https://bugs.launchpad.net/charm-percona-cluster/+bug/1728132
Empty file modified tests/gate-basic-xenial-pike
100644 → 100755
Empty file.
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy

[testenv:func27-dfs]
# Charm Functional Test
Expand Down

0 comments on commit 4e70770

Please sign in to comment.