Navigation Menu

Skip to content

Commit

Permalink
Merge pull request #26702 from ceph/backport-mimic-26685
Browse files Browse the repository at this point in the history
mimic: ceph-volume: use our own testinfra suite for functional testing

Reviewed-by: Alfredo Deza <adeza@redhat.com>
  • Loading branch information
alfredodeza committed Mar 19, 2019
2 parents 2b3c641 + 617c3e1 commit 6583c77
Show file tree
Hide file tree
Showing 7 changed files with 174 additions and 11 deletions.
8 changes: 4 additions & 4 deletions src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
Expand Up @@ -48,20 +48,20 @@ commands=
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state using ceph-ansible tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
# test cluster state using testinfra
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}

# retest to ensure cluster came back up correctly after rebooting
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

# retest to ensure cluster came back up correctly
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# test zap OSDs by ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
Expand Down
8 changes: 4 additions & 4 deletions src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
Expand Up @@ -56,19 +56,19 @@ commands=
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state using ceph-ansible tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
# test cluster state using testinfra
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}

# retest to ensure cluster came back up correctly after rebooting
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

# retest to ensure cluster came back up correctly
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
6 changes: 3 additions & 3 deletions src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
Expand Up @@ -46,8 +46,8 @@ commands=
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml

# test cluster state using ceph-ansible tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
# test cluster state testinfra
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

# make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
Expand All @@ -59,6 +59,6 @@ commands=
sleep 120

# retest to ensure cluster came back up correctly after rebooting
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests

vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
Empty file.
103 changes: 103 additions & 0 deletions src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
@@ -0,0 +1,103 @@
import pytest
import os


@pytest.fixture()
def node(host, request):
""" This fixture represents a single node in the ceph cluster. Using the
host.ansible fixture provided by testinfra it can access all the ansible
variables provided to it by the specific test scenario being ran.
You must include this fixture on any tests that operate on specific type
of node because it contains the logic to manage which tests a node
should run.
"""
ansible_vars = host.ansible.get_variables()
# tox/jenkins/user will pass in this environment variable. we need to do it this way
# because testinfra does not collect and provide ansible config passed in
# from using --extra-vars
ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master")
group_names = ansible_vars["group_names"]
num_osd_ports = 4
if ceph_dev_branch in ['luminous', 'mimic']:
num_osd_ports = 2

# capture the initial/default state
test_is_applicable = False
for marker in request.node.iter_markers():
if marker.name in group_names or marker.name == 'all':
test_is_applicable = True
break
# Check if any markers on the test method exist in the nodes group_names.
# If they do not, this test is not valid for the node being tested.
if not test_is_applicable:
reason = "%s: Not a valid test for node type: %s" % (
request.function, group_names)
pytest.skip(reason)

osd_ids = []
osds = []
cluster_address = ""
# I can assume eth1 because I know all the vagrant
# boxes we test with use that interface
address = host.interface("eth1").addresses[0]
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
num_osds = len(ansible_vars.get("devices", []))
if not num_osds:
num_osds = len(ansible_vars.get("lvm_volumes", []))
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device

# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster_name = ansible_vars.get("cluster", "ceph")
conf_path = "/etc/ceph/{}.conf".format(cluster_name)
if "osds" in group_names:
# I can assume eth2 because I know all the vagrant
# boxes we test with use that interface. OSDs are the only
# nodes that have this interface.
cluster_address = host.interface("eth2").addresses[0]
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
if cmd.rc == 0:
osd_ids = cmd.stdout.rstrip("\n").split("\n")
osds = osd_ids

data = dict(
address=address,
subnet=subnet,
vars=ansible_vars,
osd_ids=osd_ids,
num_mons=num_mons,
num_osds=num_osds,
num_osd_ports=num_osd_ports,
cluster_name=cluster_name,
conf_path=conf_path,
cluster_address=cluster_address,
osds=osds,
)
return data


def pytest_collection_modifyitems(session, config, items):
for item in items:
test_path = item.location[0]
if "mon" in test_path:
item.add_marker(pytest.mark.mons)
elif "osd" in test_path:
item.add_marker(pytest.mark.osds)
elif "mds" in test_path:
item.add_marker(pytest.mark.mdss)
elif "mgr" in test_path:
item.add_marker(pytest.mark.mgrs)
elif "rbd-mirror" in test_path:
item.add_marker(pytest.mark.rbdmirrors)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
elif "nfs" in test_path:
item.add_marker(pytest.mark.nfss)
elif "iscsi" in test_path:
item.add_marker(pytest.mark.iscsigws)
else:
item.add_marker(pytest.mark.all)
Empty file.
@@ -0,0 +1,60 @@
import json


class TestOSDs(object):

def test_ceph_osd_package_is_installed(self, node, host):
assert host.package("ceph-osd").is_installed

def test_osds_listen_on_public_network(self, node, host):
# TODO: figure out way to paramaterize this test
nb_port = (node["num_osds"] * node["num_osd_ports"])
assert host.check_output(
"netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501

def test_osds_listen_on_cluster_network(self, node, host):
# TODO: figure out way to paramaterize this test
nb_port = (node["num_osds"] * node["num_osd_ports"])
assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
(node["cluster_address"])) == str(nb_port)

def test_osd_services_are_running(self, node, host):
# TODO: figure out way to paramaterize node['osds'] for this test
for osd in node["osds"]:
assert host.service("ceph-osd@%s" % osd).is_running

def test_osd_are_mounted(self, node, host):
# TODO: figure out way to paramaterize node['osd_ids'] for this test
for osd_id in node["osd_ids"]:
osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
cluster=node["cluster_name"],
osd_id=osd_id,
)
assert host.mount_point(osd_path).exists

def test_ceph_volume_is_installed(self, node, host):
host.exists('ceph-volume')

def test_ceph_volume_systemd_is_installed(self, node, host):
host.exists('ceph-volume-systemd')

def _get_osd_id_from_host(self, node, osd_tree):
children = []
for n in osd_tree['nodes']:
if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501
children = n['children']
return children

def _get_nb_up_osds_from_ids(self, node, osd_tree):
nb_up = 0
ids = self._get_osd_id_from_host(node, osd_tree)
for n in osd_tree['nodes']:
if n['id'] in ids and n['status'] == 'up':
nb_up += 1
return nb_up

def test_all_osds_are_up_and_in(self, node, host):
cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
cluster=node["cluster_name"])
output = json.loads(host.check_output(cmd))
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)

0 comments on commit 6583c77

Please sign in to comment.