Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #26702 from ceph/backport-mimic-26685
mimic: ceph-volume: use our own testinfra suite for functional testing Reviewed-by: Alfredo Deza <adeza@redhat.com>
- Loading branch information
Showing
7 changed files
with
174 additions
and
11 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
103 changes: 103 additions & 0 deletions
103
src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
import pytest | ||
import os | ||
|
||
|
||
@pytest.fixture() | ||
def node(host, request): | ||
""" This fixture represents a single node in the ceph cluster. Using the | ||
host.ansible fixture provided by testinfra it can access all the ansible | ||
variables provided to it by the specific test scenario being ran. | ||
You must include this fixture on any tests that operate on specific type | ||
of node because it contains the logic to manage which tests a node | ||
should run. | ||
""" | ||
ansible_vars = host.ansible.get_variables() | ||
# tox/jenkins/user will pass in this environment variable. we need to do it this way | ||
# because testinfra does not collect and provide ansible config passed in | ||
# from using --extra-vars | ||
ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master") | ||
group_names = ansible_vars["group_names"] | ||
num_osd_ports = 4 | ||
if ceph_dev_branch in ['luminous', 'mimic']: | ||
num_osd_ports = 2 | ||
|
||
# capture the initial/default state | ||
test_is_applicable = False | ||
for marker in request.node.iter_markers(): | ||
if marker.name in group_names or marker.name == 'all': | ||
test_is_applicable = True | ||
break | ||
# Check if any markers on the test method exist in the nodes group_names. | ||
# If they do not, this test is not valid for the node being tested. | ||
if not test_is_applicable: | ||
reason = "%s: Not a valid test for node type: %s" % ( | ||
request.function, group_names) | ||
pytest.skip(reason) | ||
|
||
osd_ids = [] | ||
osds = [] | ||
cluster_address = "" | ||
# I can assume eth1 because I know all the vagrant | ||
# boxes we test with use that interface | ||
address = host.interface("eth1").addresses[0] | ||
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) | ||
num_mons = len(ansible_vars["groups"]["mons"]) | ||
num_osds = len(ansible_vars.get("devices", [])) | ||
if not num_osds: | ||
num_osds = len(ansible_vars.get("lvm_volumes", [])) | ||
osds_per_device = ansible_vars.get("osds_per_device", 1) | ||
num_osds = num_osds * osds_per_device | ||
|
||
# If number of devices doesn't map to number of OSDs, allow tests to define | ||
# that custom number, defaulting it to ``num_devices`` | ||
num_osds = ansible_vars.get('num_osds', num_osds) | ||
cluster_name = ansible_vars.get("cluster", "ceph") | ||
conf_path = "/etc/ceph/{}.conf".format(cluster_name) | ||
if "osds" in group_names: | ||
# I can assume eth2 because I know all the vagrant | ||
# boxes we test with use that interface. OSDs are the only | ||
# nodes that have this interface. | ||
cluster_address = host.interface("eth2").addresses[0] | ||
cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"') | ||
if cmd.rc == 0: | ||
osd_ids = cmd.stdout.rstrip("\n").split("\n") | ||
osds = osd_ids | ||
|
||
data = dict( | ||
address=address, | ||
subnet=subnet, | ||
vars=ansible_vars, | ||
osd_ids=osd_ids, | ||
num_mons=num_mons, | ||
num_osds=num_osds, | ||
num_osd_ports=num_osd_ports, | ||
cluster_name=cluster_name, | ||
conf_path=conf_path, | ||
cluster_address=cluster_address, | ||
osds=osds, | ||
) | ||
return data | ||
|
||
|
||
def pytest_collection_modifyitems(session, config, items): | ||
for item in items: | ||
test_path = item.location[0] | ||
if "mon" in test_path: | ||
item.add_marker(pytest.mark.mons) | ||
elif "osd" in test_path: | ||
item.add_marker(pytest.mark.osds) | ||
elif "mds" in test_path: | ||
item.add_marker(pytest.mark.mdss) | ||
elif "mgr" in test_path: | ||
item.add_marker(pytest.mark.mgrs) | ||
elif "rbd-mirror" in test_path: | ||
item.add_marker(pytest.mark.rbdmirrors) | ||
elif "rgw" in test_path: | ||
item.add_marker(pytest.mark.rgws) | ||
elif "nfs" in test_path: | ||
item.add_marker(pytest.mark.nfss) | ||
elif "iscsi" in test_path: | ||
item.add_marker(pytest.mark.iscsigws) | ||
else: | ||
item.add_marker(pytest.mark.all) |
Empty file.
60 changes: 60 additions & 0 deletions
60
src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import json | ||
|
||
|
||
class TestOSDs(object): | ||
|
||
def test_ceph_osd_package_is_installed(self, node, host): | ||
assert host.package("ceph-osd").is_installed | ||
|
||
def test_osds_listen_on_public_network(self, node, host): | ||
# TODO: figure out way to paramaterize this test | ||
nb_port = (node["num_osds"] * node["num_osd_ports"]) | ||
assert host.check_output( | ||
"netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501 | ||
|
||
def test_osds_listen_on_cluster_network(self, node, host): | ||
# TODO: figure out way to paramaterize this test | ||
nb_port = (node["num_osds"] * node["num_osd_ports"]) | ||
assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501 | ||
(node["cluster_address"])) == str(nb_port) | ||
|
||
def test_osd_services_are_running(self, node, host): | ||
# TODO: figure out way to paramaterize node['osds'] for this test | ||
for osd in node["osds"]: | ||
assert host.service("ceph-osd@%s" % osd).is_running | ||
|
||
def test_osd_are_mounted(self, node, host): | ||
# TODO: figure out way to paramaterize node['osd_ids'] for this test | ||
for osd_id in node["osd_ids"]: | ||
osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format( | ||
cluster=node["cluster_name"], | ||
osd_id=osd_id, | ||
) | ||
assert host.mount_point(osd_path).exists | ||
|
||
def test_ceph_volume_is_installed(self, node, host): | ||
host.exists('ceph-volume') | ||
|
||
def test_ceph_volume_systemd_is_installed(self, node, host): | ||
host.exists('ceph-volume-systemd') | ||
|
||
def _get_osd_id_from_host(self, node, osd_tree): | ||
children = [] | ||
for n in osd_tree['nodes']: | ||
if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501 | ||
children = n['children'] | ||
return children | ||
|
||
def _get_nb_up_osds_from_ids(self, node, osd_tree): | ||
nb_up = 0 | ||
ids = self._get_osd_id_from_host(node, osd_tree) | ||
for n in osd_tree['nodes']: | ||
if n['id'] in ids and n['status'] == 'up': | ||
nb_up += 1 | ||
return nb_up | ||
|
||
def test_all_osds_are_up_and_in(self, node, host): | ||
cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501 | ||
cluster=node["cluster_name"]) | ||
output = json.loads(host.check_output(cmd)) | ||
assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) |