diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2b59d10a010..0aa97dd4f31 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,17 +1,19 @@ ## Proposed Commit Message -> summary: no more than 70 characters -> -> A description of what the change being made is and why it is being -> made, if the summary line is insufficient. The blank line above is -> required. This should be wrapped at 72 characters, but otherwise has -> no particular length requirements. -> -> If you need to write multiple paragraphs, feel free. -> -> LP: #NNNNNNN (replace with the appropriate bug reference or remove -> this line entirely if there is no associated bug) +``` +summary: no more than 70 characters + +A description of what the change being made is and why it is being +made, if the summary line is insufficient. The blank line above is +required. This should be wrapped at 72 characters, but otherwise has +no particular length requirements. + +If you need to write multiple paragraphs, feel free. + +LP: #NNNNNNN (replace with the appropriate bug reference or remove +this line entirely if there is no associated bug) +``` ## Additional Context diff --git a/.travis.yml b/.travis.yml index 2fad49f37eb..8ead3468423 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,16 +33,16 @@ install: script: - tox +env: + TOXENV=py3 + PYTEST_ADDOPTS=-v # List all tests run by pytest + matrix: fast_finish: true - allow_failures: - - name: "Integration Tests (WIP)" include: - python: 3.6 - env: - TOXENV=py3 - PYTEST_ADDOPTS=-v # List all tests run by pytest - if: NOT branch =~ /^ubuntu\// + env: {} cache: - directories: - lxd_images @@ -121,8 +121,9 @@ matrix: - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' # Ubuntu LTS: Integration - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb' - - name: "Integration Tests (WIP)" + - name: "Integration Tests" if: NOT branch =~ /^ubuntu\// + env: {} cache: - directories: - lxd_images @@ -220,3 +221,9 @@ matrix: env: TOXENV=pylint - python: 3.6 env: TOXENV=doc + # Test all supported Python versions (but at the end, so we schedule + # longer-running jobs first) + - python: 3.9 + - python: 3.8 + - python: 3.7 + - python: 3.5 diff --git a/HACKING.rst b/HACKING.rst index 8a12e3e3ec9..6ce4397d8f3 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -173,154 +173,11 @@ Cloud Config Modules * Any new modules should use underscores in any new config options and not hyphens (e.g. `new_option` and *not* `new-option`). -.. _unit_testing: - -Testing ------------- - -cloud-init has both unit tests and integration tests. Unit tests can -be found in-tree alongside the source code, as well as -at ``tests/unittests``. Integration tests can be found at -``tests/integration_tests``. Documentation specifically for integration -tests can be found on the :ref:`integration_tests` page, but -the guidelines specified below apply to both types of tests. - -cloud-init uses `pytest`_ to run its tests, and has tests written both -as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests. -The following guidelines should be followed: - -* For ease of organisation and greater accessibility for developers not - familiar with pytest, all cloud-init unit tests must be contained - within test classes - - * Put another way, module-level test functions should not be used - -* pytest test classes should use `pytest fixtures`_ to share - functionality instead of inheritance - -* As all tests are contained within classes, it is acceptable to mix - ``TestCase`` test classes and pytest test classes within the same - test file - - * These can be easily distinguished by their definition: pytest - classes will not use inheritance at all (e.g. - `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will - subclass (indirectly) from ``TestCase`` (e.g. - `TestPrependBaseCommands`_) - -* pytest tests should use bare ``assert`` statements, to take advantage - of pytest's `assertion introspection`_ - - * For ``==`` and other commutative assertions, the expected value - should be placed before the value under test: - ``assert expected_value == function_under_test()`` - -* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use - pytest features that are available in v2.8.7. This is an - inexhaustive list of ways in which this may catch you out: - - * Support for using ``yield`` in ``pytest.fixture`` functions was - only introduced in `pytest 3.0`_. Such functions must instead use - the ``pytest.yield_fixture`` decorator. - - * Only the following built-in fixtures are available - [#fixture-list]_: - - * ``cache`` - * ``capfd`` - * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial) - * ``capsys`` - * ``monkeypatch`` - * ``pytestconfig`` - * ``record_xml_property`` - * ``recwarn`` - * ``tmpdir_factory`` - * ``tmpdir`` - - * On xenial, the objects returned by the ``tmpdir`` fixture cannot be - used where paths are required; they are rejected as invalid paths. - You must instead use their ``.strpath`` attribute. - - * For example, instead of - ``util.write_file(tmpdir.join("some_file"), ...)``, you should - write ``util.write_file(tmpdir.join("some_file").strpath, ...)``. - - * The `pytest.param`_ function cannot be used. It was introduced in - pytest 3.1, which means it is not available on xenial. The more - limited mechanism it replaced was removed in pytest 4.0, so is not - available in focal or later. The only available alternatives are - to write mark-requiring test instances as completely separate - tests, without utilising parameterisation, or to apply the mark to - the entire parameterized test (and therefore every test instance). - -* Variables/parameter names for ``Mock`` or ``MagicMock`` instances - should start with ``m_`` to clearly distinguish them from non-mock - variables - - * For example, ``m_readurl`` (which would be a mock for ``readurl``) - -* The ``assert_*`` methods that are available on ``Mock`` and - ``MagicMock`` objects should be avoided, as typos in these method - names may not raise ``AttributeError`` (and so can cause tests to - silently pass). An important exception: if a ``Mock`` is - `autospecced`_ then misspelled assertion methods *will* raise an - ``AttributeError``, so these assertion methods may be used on - autospecced ``Mock`` objects. - - For non-autospecced ``Mock`` s, these substitutions can be used - (``m`` is assumed to be a ``Mock``): - - * ``m.assert_any_call(*args, **kwargs)`` => ``assert - mock.call(*args, **kwargs) in m.call_args_list`` - * ``m.assert_called()`` => ``assert 0 != m.call_count`` - * ``m.assert_called_once()`` => ``assert 1 == m.call_count`` - * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert - [mock.call(*args, **kwargs)] == m.call_args_list`` - * ``m.assert_called_with(*args, **kwargs)`` => ``assert - mock.call(*args, **kwargs) == m.call_args_list[-1]`` - * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in - call_list: assert call in m.call_args_list`` - - * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(..., - any_order=False)`` are not easily replicated in a single - statement, so their use when appropriate is acceptable. - - * ``m.assert_not_called()`` => ``assert 0 == m.call_count`` - -* Test arguments should be ordered as follows: - - * ``mock.patch`` arguments. When used as a decorator, ``mock.patch`` - partially applies its generated ``Mock`` object as the first - argument, so these arguments must go first. - * ``pytest.mark.parametrize`` arguments, in the order specified to - the ``parametrize`` decorator. These arguments are also provided - by a decorator, so it's natural that they sit next to the - ``mock.patch`` arguments. - * Fixture arguments, alphabetically. These are not provided by a - decorator, so they are last, and their order has no defined - meaning, so we default to alphabetical. - -* It follows from this ordering of test arguments (so that we retain - the property that arguments left-to-right correspond to decorators - bottom-to-top) that test decorators should be ordered as follows: - - * ``pytest.mark.parametrize`` - * ``mock.patch`` - -* When there are multiple patch calls in a test file for the module it - is testing, it may be desirable to capture the shared string prefix - for these patch calls in a module-level variable. If used, such - variables should be named ``M_PATH`` or, for datasource tests, - ``DS_PATH``. - -.. _pytest: https://docs.pytest.org/ -.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html -.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15 -.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9 -.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html -.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 -.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param -.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing +Tests +----- + +Submissions to cloud-init must include testing. See :ref:`testing` for +details on these requirements. Type Annotations ---------------- @@ -344,13 +201,6 @@ variable annotations specified in `PEP-526`_ were introduced in Python .. _PEP-484: https://www.python.org/dev/peps/pep-0484/ .. _PEP-526: https://www.python.org/dev/peps/pep-0526/ -.. [#fixture-list] This list of fixtures (with markup) can be - reproduced by running:: - - py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' - - in a xenial lxd container with python3-pytest-catchlog installed. - Feature Flags ------------- diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 80d217caad4..0668ffa3962 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -28,11 +28,13 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True) + metavar="PATH", required=True, + help="The network configuration to read") parser.add_argument("-k", "--kind", choices=['eni', 'network_data.json', 'yaml', 'azure-imds', 'vmware-imc'], - required=True) + required=True, + help="The format of the given network config") parser.add_argument("-d", "--directory", metavar="PATH", help="directory to place output in", @@ -50,7 +52,8 @@ def get_parser(parser=None): help='enable debug logging to stderr.') parser.add_argument("-O", "--output-kind", choices=['eni', 'netplan', 'sysconfig'], - required=True) + required=True, + help="The network config format to emit") return parser diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index a5446da778d..baf1381feb2 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 73d8719f392..bb8a1278097 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -389,7 +389,7 @@ PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el'] +PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] def get_default_mirrors(arch=None, target=None): diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 3c453d91ca9..bd7bead9410 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -25,7 +25,7 @@ **Module frequency:** per instance -**Supported distros:** alpine, debian, ubuntu +**Supported distros:** alpine, debian, ubuntu, rhel **Config keys**:: @@ -44,60 +44,104 @@ from cloudinit import subp from cloudinit import util -CA_CERT_PATH = "/usr/share/ca-certificates/" -CA_CERT_FILENAME = "cloud-init-ca-certs.crt" -CA_CERT_CONFIG = "/etc/ca-certificates.conf" -CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" -CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) +DEFAULT_CONFIG = { + 'ca_cert_path': '/usr/share/ca-certificates/', + 'ca_cert_filename': 'cloud-init-ca-certs.crt', + 'ca_cert_config': '/etc/ca-certificates.conf', + 'ca_cert_system_path': '/etc/ssl/certs/', + 'ca_cert_update_cmd': ['update-ca-certificates'] +} +DISTRO_OVERRIDES = { + 'rhel': { + 'ca_cert_path': '/usr/share/pki/ca-trust-source/', + 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', + 'ca_cert_config': None, + 'ca_cert_system_path': '/etc/pki/ca-trust/', + 'ca_cert_update_cmd': ['update-ca-trust'] + } +} -distros = ['alpine', 'debian', 'ubuntu'] +distros = ['alpine', 'debian', 'ubuntu', 'rhel'] -def update_ca_certs(): + +def _distro_ca_certs_configs(distro_name): + """Return a distro-specific ca_certs config dictionary + + @param distro_name: String providing the distro class name. + @returns: Dict of distro configurations for ca-cert. + """ + cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) + cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], + cfg['ca_cert_filename']) + return cfg + + +def update_ca_certs(distro_cfg): """ Updates the CA certificate cache on the current machine. + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(["update-ca-certificates"], capture=False) + subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) -def add_ca_certs(certs): +def add_ca_certs(distro_cfg, certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. @param certs: A list of certificate strings. """ - if certs: - # First ensure they are strings... - cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644) - - if os.stat(CA_CERT_CONFIG).st_size == 0: - # If the CA_CERT_CONFIG file is empty (i.e. all existing - # CA certs have been deleted) then simply output a single - # line with the cloud-init cert filename. - out = "%s\n" % CA_CERT_FILENAME - else: - # Append cert filename to CA_CERT_CONFIG file. - # We have to strip the content because blank lines in the file - # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(CA_CERT_CONFIG) - cur_cont = '\n'.join([line for line in orig.splitlines() - if line != CA_CERT_FILENAME]) - out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) - util.write_file(CA_CERT_CONFIG, out, omode="wb") - - -def remove_default_ca_certs(distro_name): + if not certs: + return + # First ensure they are strings... + cert_file_contents = "\n".join([str(c) for c in certs]) + util.write_file(distro_cfg['ca_cert_full_path'], + cert_file_contents, + mode=0o644) + update_cert_config(distro_cfg) + + +def update_cert_config(distro_cfg): + """ + Update Certificate config file to add the file path managed cloud-init + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. + """ + if distro_cfg['ca_cert_config'] is None: + return + if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + # If the CA_CERT_CONFIG file is empty (i.e. all existing + # CA certs have been deleted) then simply output a single + # line with the cloud-init cert filename. + out = "%s\n" % distro_cfg['ca_cert_filename'] + else: + # Append cert filename to CA_CERT_CONFIG file. + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(distro_cfg['ca_cert_config']) + cr_cont = '\n'.join([line for line in orig.splitlines() + if line != distro_cfg['ca_cert_filename']]) + out = "%s\n%s\n" % (cr_cont.rstrip(), + distro_cfg['ca_cert_filename']) + util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + + +def remove_default_ca_certs(distro_name, distro_cfg): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. + + @param distro_name: String providing the distro class name. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(CA_CERT_PATH) - util.delete_dir_contents(CA_CERT_SYSTEM_PATH) - util.write_file(CA_CERT_CONFIG, "", mode=0o644) + util.delete_dir_contents(distro_cfg['ca_cert_path']) + util.delete_dir_contents(distro_cfg['ca_cert_system_path']) + util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) - if distro_name != 'alpine': + if distro_name in ['debian', 'ubuntu']: debconf_sel = ( "ca-certificates ca-certificates/trust_new_crts " + "select no") subp.subp(('debconf-set-selections', '-'), debconf_sel) @@ -120,22 +164,23 @@ def handle(name, cfg, cloud, log, _args): return ca_cert_cfg = cfg['ca-certs'] + distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.distro.name) + remove_default_ca_certs(cloud.distro.name, distro_cfg) # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(trusted_certs) + add_ca_certs(distro_cfg, trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") - update_ca_certs() + update_ca_certs(distro_cfg) # vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a930e6127c7..99a4bae4597 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -396,6 +396,13 @@ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): # Only IPv6 is DHCP, IPv4 may be static iface_cfg['BOOTPROTO'] = 'dhcp6' iface_cfg['DHCLIENT6_MODE'] = 'managed' + # only if rhel AND dhcpv6 stateful + elif (flavor == 'rhel' and + subnet_type == 'ipv6_dhcpv6-stateful'): + iface_cfg['BOOTPROTO'] = 'dhcp' + iface_cfg['DHCPV6C'] = True + iface_cfg['IPV6INIT'] = True + iface_cfg['IPV6_AUTOCONF'] = False else: iface_cfg['IPV6INIT'] = True # Configure network settings using DHCPv6 diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 04ff2131d35..090dd66b897 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -651,6 +651,10 @@ def get_public_ssh_keys(self): LOG.debug('Retrieving public SSH keys') ssh_keys = [] try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable ssh_keys = [ public_key['keyData'] for public_key @@ -983,6 +987,7 @@ def _wait_for_all_nics_ready(self): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" @@ -1271,6 +1276,10 @@ def _negotiate(self): pubkey_info = None try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable public_keys = self.metadata['imds']['compute']['publicKeys'] LOG.debug( 'Successfully retrieved %s key(s) from IMDS', @@ -1969,6 +1978,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: netconfig = {'version': 2, 'ethernets': {}} network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + has_ip_address = False # First IPv4 and/or IPv6 address will be obtained via DHCP. # Any additional IPs of each type will be set as static # addresses. @@ -1978,6 +1988,11 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: 'dhcp6': False} for addr_type in ('ipv4', 'ipv6'): addresses = intf.get(addr_type, {}).get('ipAddress', []) + # If there are no available IP addresses, then we don't + # want to add this interface to the generated config. + if not addresses: + continue + has_ip_address = True if addr_type == 'ipv4': default_prefix = '24' else: @@ -1998,7 +2013,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: dev_config['addresses'].append( '{ip}/{prefix}'.format( ip=privateIp, prefix=netPrefix)) - if dev_config: + if dev_config and has_ip_address: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update({ 'match': {'macaddress': mac.lower()}, diff --git a/debian/changelog b/debian/changelog index 326d5621a6e..790a437e851 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,69 @@ +cloud-init (20.4-55-g4f62ae8d-0ubuntu1) hirsute; urgency=medium + + * New upstream snapshot. + - Fix regression with handling of IMDS ssh keys (#760) [Thomas Stringer] + - integration_tests: log cloud-init version in SUT (#758) + - Add ajmyyra as contributor (#742) [Antti Myyrä] + - net_convert: add some missing help text (#755) + - Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL + (#753) [Eduardo Otubo] + - doc: document missing IPv6 subnet types (#744) [Antti Myyrä] + - Add example configuration for datasource `AliYun` (#751) [Xiaoyu Zhong] + - integration_tests: add SSH key selection settings (#754) + - fix a typo in man page cloud-init.1 (#752) [Amy Chen] + - network-config-format-v2.rst: add Netplan Passthrough section (#750) + - stale: re-enable post holidays (#749) + - integration_tests: port ca_certs tests from cloud_tests (#732) + - Azure: Add telemetry for poll IMDS (#741) [Johnson Shi] + - doc: move testing section from HACKING to its own doc (#739) + - No longer allow integration test failures on travis (#738) + - stale: fix error in definition (#740) + - integration_tests: set log-cli-level to INFO by default (#737) + - PULL_REQUEST_TEMPLATE.md: use backticks around commit message (#736) + - stale: disable check for holiday break (#735) + - integration_tests: log the path we collect logs into (#733) + - .travis.yml: add (most) supported Python versions to CI (#734) + - integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731) + - cc_ca_certs: add RHEL support (#633) [cawamata] + - Azure: only generate config for NICs with addresses (#709) + [Thomas Stringer] + - doc: fix CloudStack configuration example (#707) [Olivier Lemasle] + - integration_tests: restrict test_lxd_bridge appropriately (#730) + - Add integration tests for CLI functionality (#729) + - Integration test for gh-626 (#728) + - Some test_upgrade fixes (#726) + - Ensure overriding test vars with env vars works for booleans (#727) + - integration_tests: port lxd_bridge test from cloud_tests (#718) + - Integration test for gh-632. (#725) + - Integration test for gh-671 (#724) + - integration-requirements.txt: bump pycloudlib commit (#723) + - Drop unnecessary shebang from cmd/main.py (#722) [Eduardo Otubo] + - Integration test for LP: #1813396 and #669 (#719) + - integration_tests: include timestamp in log output (#720) + - integration_tests: add test for LP: #1898997 (#713) + - Add integration test for power_state_change module (#717) + - Update documentation for network-config-format-v2 (#701) [ggiesen] + - sandbox CA Cert tests to not require ca-certificates (#715) + [Eduardo Otubo] + - Add upgrade integration test (#693) + - Integration test for 570 (#712) + - Add ability to keep snapshotted images in integration tests (#711) + - Integration test for pull #586 (#706) + - integration_tests: introduce skipping of tests by OS (#702) + - integration_tests: introduce IntegrationInstance.restart (#708) + - Add lxd-vm to list of valid integration test platforms (#705) + - Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL + (#685) [Eduardo Otubo] + - Delete image snapshots created for integration tests (#682) + - Parametrize ssh_keys_provided integration test (#700) [lucasmoura] + - Drop use_sudo attribute on IntegrationInstance (#694) [lucasmoura] + - cc_apt_configure: add riscv64 as a ports arch (#687) + [Dimitri John Ledkov] + - cla: add xnox (#692) [Dimitri John Ledkov] + - Collect logs from integration test runs (#675) + + -- Daniel Watkins Mon, 11 Jan 2021 16:57:30 -0500 + cloud-init (20.4-0ubuntu2) hirsute; urgency=medium * d/cloud-init.manpages: include upstream manpages in package (LP: #1908548) diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1 index 9b52dc8d85b..3fde41483ba 100644 --- a/doc/man/cloud-init.1 +++ b/doc/man/cloud-init.1 @@ -10,7 +10,7 @@ cloud-init \- Cloud instance initialization Cloud-init provides a mechanism for cloud instance initialization. This is done by identifying the cloud platform that is in use, reading provided cloud metadata and optional vendor and user -data, and then intializing the instance as requested. +data, and then initializing the instance as requested. Generally, this command is not normally meant to be run directly by the user. However, some subcommands may useful for development or diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index ddcb0b31f73..10e8228fe55 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -75,6 +75,7 @@ Having trouble? We would like to help! topics/dir_layout.rst topics/analyze.rst topics/docs.rst + topics/testing.rst topics/integration_tests.rst topics/cloud_tests.rst diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst index 3f4f40cad2c..587bd4f4bc0 100644 --- a/doc/rtd/topics/datasources/aliyun.rst +++ b/doc/rtd/topics/datasources/aliyun.rst @@ -12,6 +12,21 @@ The Alibaba Cloud metadata service is available at the well known url Alibaba Cloud ECS on `metadata `__. +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + AliYun: + metadata_urls: ["http://100.100.100.200"] + timeout: 50 + max_wait: 120 + Versions ^^^^^^^^ Like the EC2 metadata service, Alibaba Cloud's metadata service provides diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index a24de34fcfa..325aeeaf503 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -46,8 +46,6 @@ An example configuration with the default values is provided below: CloudStack: max_wait: 120 timeout: 50 - datasource_list: - - CloudStack .. _Apache CloudStack: http://cloudstack.apache.org/ diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index 0d416f32f8c..fb3006fe414 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -1,6 +1,6 @@ -******************************** -Testing and debugging cloud-init -******************************** +******************** +Debugging cloud-init +******************** Overview ======== diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst index aeda326cab4..6c124ad919e 100644 --- a/doc/rtd/topics/integration_tests.rst +++ b/doc/rtd/topics/integration_tests.rst @@ -9,11 +9,41 @@ Overview Integration tests are written using pytest and are located at ``tests/integration_tests``. General design principles -laid out in :ref:`unit_testing` should be followed for integration tests. +laid out in :ref:`testing` should be followed for integration tests. Setup is accomplished via a set of fixtures located in ``tests/integration_tests/conftest.py``. +Image Selection +=============== + +Each integration testing run uses a single image as its basis. This +image is configured using the ``OS_IMAGE`` variable; see +:ref:`Configuration` for details of how configuration works. + +``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g. +"focal"), or an image specification. If an Ubuntu series name is +given, then the most recent image for that series on the target cloud +will be used. For other use cases, an image specification is used. + +In its simplest form, an image specification can simply be a cloud's +image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the +image so-identified will be used as the basis for this testing run. + +This has a drawback, however: as we do not know what OS or release is +within the image, the integration testing framework will run *all* +tests against the image in question. If it's a RHEL8 image, then we +would expect Ubuntu-specific tests to fail (and vice versa). + +To address this, a full image specification can be given. This is of +the form: ``[::[:: ``assert + mock.call(*args, **kwargs) in m.call_args_list`` + * ``m.assert_called()`` => ``assert 0 != m.call_count`` + * ``m.assert_called_once()`` => ``assert 1 == m.call_count`` + * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert + [mock.call(*args, **kwargs)] == m.call_args_list`` + * ``m.assert_called_with(*args, **kwargs)`` => ``assert + mock.call(*args, **kwargs) == m.call_args_list[-1]`` + * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in + call_list: assert call in m.call_args_list`` + + * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(..., + any_order=False)`` are not easily replicated in a single + statement, so their use when appropriate is acceptable. + + * ``m.assert_not_called()`` => ``assert 0 == m.call_count`` + +* When there are multiple patch calls in a test file for the module it + is testing, it may be desirable to capture the shared string prefix + for these patch calls in a module-level variable. If used, such + variables should be named ``M_PATH`` or, for datasource tests, + ``DS_PATH``. + +Test Argument Ordering +---------------------- + +* Test arguments should be ordered as follows: + + * ``mock.patch`` arguments. When used as a decorator, ``mock.patch`` + partially applies its generated ``Mock`` object as the first + argument, so these arguments must go first. + * ``pytest.mark.parametrize`` arguments, in the order specified to + the ``parametrize`` decorator. These arguments are also provided + by a decorator, so it's natural that they sit next to the + ``mock.patch`` arguments. + * Fixture arguments, alphabetically. These are not provided by a + decorator, so they are last, and their order has no defined + meaning, so we default to alphabetical. + +* It follows from this ordering of test arguments (so that we retain + the property that arguments left-to-right correspond to decorators + bottom-to-top) that test decorators should be ordered as follows: + + * ``pytest.mark.parametrize`` + * ``mock.patch`` + +.. [#fixture-list] This list of fixtures (with markup) can be + reproduced by running:: + + py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' + + in a xenial lxd container with python3-pytest-catchlog installed. + +.. _pytest: https://docs.pytest.org/ +.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html +.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15 +.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9 +.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html +.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 +.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param +.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing diff --git a/integration-requirements.txt b/integration-requirements.txt index 3648a0f16d2..ec765763003 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4b8d2cd5ac6316810ce16d081842da575625ca4f +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@72e800b8e99c5b735348c4778f19f92cf6c63de0 pytest diff --git a/tests/integration_tests/assets/__init__.py b/tests/integration_tests/assets/__init__.py new file mode 100644 index 00000000000..0cf2798223f --- /dev/null +++ b/tests/integration_tests/assets/__init__.py @@ -0,0 +1,16 @@ +import os +from collections import namedtuple +from pathlib import Path + +ASSET_DIR = Path(os.path.dirname(os.path.realpath(__file__))) +PRIVATE_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa' +PUBLIC_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa.pub' + + +def get_test_rsa_keypair(): + with PUBLIC_RSA_KEY_PATH.open() as public_file: + public_key = public_file.read() + with PRIVATE_RSA_KEY_PATH.open() as private_file: + private_key = private_file.read() + KeyPair = namedtuple('KeyPair', 'public_rsa_key private_rsa_key') + return KeyPair(public_key, private_key) diff --git a/tests/integration_tests/assets/test_id_rsa b/tests/integration_tests/assets/test_id_rsa new file mode 100755 index 00000000000..1ef67135951 --- /dev/null +++ b/tests/integration_tests/assets/test_id_rsa @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAnY0A/9s891CKmS3CBVGgLjdEifgYf8y3Ht+TC3+ajvEIcEoxevgK +t2mXoPw4P/5QrT4QmgG5N2Jr4Q5eNcgZ0zytoAlNxcM/R/soKi46hiLSArMNEiOlVi/vbf +q3Du5XIiX8fZD+4xgq3+jQ3PMxzqBf3AU6RAC9Jj94pBHCbaKPSFfWU7fq/JIvukKIWckv +UCkzNiaAWop3jNSEwFZbqKe2A1yesfgSpCmycggOVN/tMpFeVn8rzzG+LJ47TSoBav1P0F +CegVKq5iyGhLdM4TnS3ajbSIf3+SSDMImyzzmXqPyG5zwcH8zuEI3aR9DTMmi49HnjVX0A +N+61iQf8MEz9nnpUCnaeogiI4zfJQIMKHijGcFYR092BqJUmy50s239nFFBzCFRcmERdyh +fxrrG5kpEPllPGO4AuCtR4aW9yVTqfDNO2dfX5xvF8ZakUCMdR7JomuzW097U8zHE6EOTI +XVXatlI1cHt+KeugVrjzK2YNE1HEeGCt4l7qmpAXAAAFgL+LUj+/i1I/AAAAB3NzaC1yc2 +EAAAGBAJ2NAP/bPPdQipktwgVRoC43RIn4GH/Mtx7fkwt/mo7xCHBKMXr4Crdpl6D8OD/+ +UK0+EJoBuTdia+EOXjXIGdM8raAJTcXDP0f7KCouOoYi0gKzDRIjpVYv7236tw7uVyIl/H +2Q/uMYKt/o0NzzMc6gX9wFOkQAvSY/eKQRwm2ij0hX1lO36vySL7pCiFnJL1ApMzYmgFqK +d4zUhMBWW6intgNcnrH4EqQpsnIIDlTf7TKRXlZ/K88xviyeO00qAWr9T9BQnoFSquYsho +S3TOE50t2o20iH9/kkgzCJss85l6j8huc8HB/M7hCN2kfQ0zJouPR541V9ADfutYkH/DBM +/Z56VAp2nqIIiOM3yUCDCh4oxnBWEdPdgaiVJsudLNt/ZxRQcwhUXJhEXcoX8a6xuZKRD5 +ZTxjuALgrUeGlvclU6nwzTtnX1+cbxfGWpFAjHUeyaJrs1tPe1PMxxOhDkyF1V2rZSNXB7 +finroFa48ytmDRNRxHhgreJe6pqQFwAAAAMBAAEAAAGALgAgfZPGnjMu9ICOuLzXdwb+BQ +aiKJZeFS6UIXRVbUzk+NxAzDWl811qPz/FMLIRXjPT5xN/v7MF6oUmbq+JEssRqrtssMRM +MrkbRg2PWuDJzq32sAgmWx7N2p+sWTivyjGrIgJ22VmSEyRH72s2bK0YsAX6uCY7E/LOR6 +FD0nz3Ntkmo/T8MFiChPCuHQEHxnDxGett6IGrXDwksn/EbV7iXuLpFu9mifX+uxqtDI0B +FZWqJLkm0m0kqKReji4oHIUJXw6yQrVYTy71WZ/VXzJ+B7w/WvFGJjAvZQSrwzElRfH8Th +3knN6csSvC2sGCx5Dqi7fxpL4nZsYdGyB8siIxbvye4DUO8RXMjfJP9R+HMsBd4tSxjzLE +lVNBrClJt2yt1JbuAwzsZpbk5jvJ2PqupqQX+WLrJ+i66nByUDCxtXuHG+e3FnATdcLtSv +QvpuzSN3pfurK/C/J3kf6BTF9avK+bgw+MhbD3Ct4Lqi9s3VSgBQQCp7Hr67JEv4jZAAAA +wGTnfuIb3F3p/NhEudAucWkXEbV9zsnIAtSoguSh7fPC21MGyfx6chsf4mXKHUNfuw8m5W +2+tYa18af7Tp2nHWHPmWbY1z+g/DFCMCPaEbi1zffMlcTw7OZ8eV6D4isD30ul+eVguSYE +Hw+ILgWQXVen0oFtM1FOHiIPtB8CWyaX6njyLRQU9dt3qcWBm/tocy4hTSfheeU21pupEw +6/kfvqkq8J5MYTWF9yE1dNGEuoV2XpidX4ell/oH0qEPy4vQAAAMEAzZlbUVJ8OimOLivJ +J0KYJD/QDPMk1bDsHi3MhBnWYEJ4t78L63aG2CAIy9DazueJH7S7h1N0Wbx5v0gSGA1gb2 +8YaEeUJDrrV7I1IbG9Gu4/Swfm4YAE+1mwK54CE5Nkefd+nz7jxLQSRUNO+AB3ldRpgqXV +CGT+rkhF3wY9D4dpzRl9KQiVKIQLfqyUlO5PHWEGKh3gPRbHUv8GWSA463L3aSl26cnAmv +CE++Ssusa9cIczTO7H+BsFc7MpWJKrAAAAwQDELFCX4ES/0HcVlTH3IjoRtIvPMb6lq545 +hPXwOb9ZpBmrgd5byZ0cEkjgEklNUZ9A9QIxrPNlVsB+z8QU+sV1qsExrPA9Fo8t9vFApi +u6WFR5QbIl9A2fh+2wZR2i0ftsndGdy9zA4LBiBj4EzQYYG7PEzSHIvyGGc0iaG4YCP+D0 +ljbVv47DruKa3RYv8Feuevo2hhRBRLnwah8cqXyehprqwA96WOoRXZVPgz7LQHKKAVYKWY +THmWZfry/xGEUAAAAKamFtZXNAbmV3dAE= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/test_id_rsa.pub b/tests/integration_tests/assets/test_id_rsa.pub new file mode 100644 index 00000000000..c14c3ea5b78 --- /dev/null +++ b/tests/integration_tests/assets/test_id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCdjQD/2zz3UIqZLcIFUaAuN0SJ+Bh/zLce35MLf5qO8QhwSjF6+Aq3aZeg/Dg//lCtPhCaAbk3YmvhDl41yBnTPK2gCU3Fwz9H+ygqLjqGItICsw0SI6VWL+9t+rcO7lciJfx9kP7jGCrf6NDc8zHOoF/cBTpEAL0mP3ikEcJtoo9IV9ZTt+r8ki+6QohZyS9QKTM2JoBaineM1ITAVluop7YDXJ6x+BKkKbJyCA5U3+0ykV5WfyvPMb4snjtNKgFq/U/QUJ6BUqrmLIaEt0zhOdLdqNtIh/f5JIMwibLPOZeo/IbnPBwfzO4QjdpH0NMyaLj0eeNVfQA37rWJB/wwTP2eelQKdp6iCIjjN8lAgwoeKMZwVhHT3YGolSbLnSzbf2cUUHMIVFyYRF3KF/GusbmSkQ+WU8Y7gC4K1Hhpb3JVOp8M07Z19fnG8XxlqRQIx1Hsmia7NbT3tTzMcToQ5MhdVdq2UjVwe34p66BWuPMrZg0TUcR4YK3iXuqakBc= test@example diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py new file mode 100644 index 00000000000..b8866edd968 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh570.py @@ -0,0 +1,38 @@ +"""Integration test for #570. + +Test that we can add optional vendor-data to the seedfrom file in a +NoCloud environment +""" + +from tests.integration_tests.instances import IntegrationInstance +import pytest + +VENDOR_DATA = """\ +#cloud-config +runcmd: + - touch /var/tmp/seeded_vendordata_test_file +""" + + +# Only running on LXD because we need NoCloud for this test +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): + seed_dir = '/var/tmp/test_seed_dir' + result = client.execute( + "mkdir {seed_dir} && " + "touch {seed_dir}/user-data && " + "touch {seed_dir}/meta-data && " + "echo 'seedfrom: {seed_dir}/' > " + "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir) + ) + assert result.return_code == 0 + + client.write_to_file( + '{}/vendor-data'.format(seed_dir), + VENDOR_DATA, + ) + client.execute('cloud-init clean --logs') + client.restart(raise_on_cloudinit_failure=True) + assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp') diff --git a/tests/integration_tests/bugs/test_gh586.py b/tests/integration_tests/bugs/test_gh586.py new file mode 100644 index 00000000000..44b643f16ce --- /dev/null +++ b/tests/integration_tests/bugs/test_gh586.py @@ -0,0 +1,42 @@ +"""Integration test for pull #586 + +If a non-default AuthorizedKeysFile is specified in /etc/ssh/sshd_config, +ensure we can still ssh as expected. +""" +import paramiko +import pytest +from io import StringIO +from tests.integration_tests.assets import get_test_rsa_keypair + + +public_rsa_key, private_rsa_key = get_test_rsa_keypair() +USER_DATA = """\ +#cloud-config +bootcmd: + - sed -i 's/#AuthorizedKeysFile.*/AuthorizedKeysFile\\ .ssh\\/authorized_keys2/' /etc/ssh/sshd_config +ssh_authorized_keys: + - {public_key} +""".format(public_key=public_rsa_key) # noqa: E501 + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(USER_DATA) +def test_non_default_authorized_keys(client): + sshd = client.read_from_file('/etc/ssh/sshd_config') + assert 'AuthorizedKeysFile .ssh/authorized_keys2' in sshd + assert sshd.count('AuthorizedKeysFile') == 1 + + ssh_dir = client.execute('ls /home/ubuntu/.ssh').stdout + assert 'authorized_keys2' in ssh_dir + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + paramiko_key = paramiko.RSAKey.from_private_key(StringIO(private_rsa_key)) + + # Will fail with paramiko.ssh_exception.AuthenticationException + # if this bug isn't fixed + ssh.connect( + client.instance.ip, + username=client.instance.username, + pkey=paramiko_key, + ) diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py new file mode 100644 index 00000000000..2d336462b97 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh626.py @@ -0,0 +1,42 @@ +"""Integration test for gh-626. + +Ensure if wakeonlan is specified in the network config that it is rendered +in the /etc/network/interfaces or netplan config. +""" + +import pytest +import yaml + +from tests.integration_tests.clouds import ImageSpecification +from tests.integration_tests.instances import IntegrationInstance + + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + dhcp4: true + wakeonlan: true +""" + +EXPECTED_ENI_END = """\ +iface eth0 inet dhcp + ethernet-wol g""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + 'user.network-config': NETWORK_CONFIG +}) +def test_wakeonlan(client: IntegrationInstance): + if ImageSpecification.from_os_image().release == 'xenial': + eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg') + assert eni.endswith(EXPECTED_ENI_END) + return + + netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_yaml = yaml.safe_load(netplan_cfg) + assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0'] + assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py new file mode 100644 index 00000000000..cf166356ebd --- /dev/null +++ b/tests/integration_tests/bugs/test_gh632.py @@ -0,0 +1,30 @@ +"""Integration test for gh-632. + +Verify that if cloud-init is using DataSourceRbxCloud, there is +no traceback if the metadata disk cannot be found. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +@pytest.mark.sru_2020_11 +def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): + client.write_to_file( + '/etc/cloud/cloud.cfg.d/90_dpkg.cfg', + 'datasource_list: [ RbxCloud, NoCloud ]\n', + ) + client.write_to_file( + '/etc/cloud/ds-identify.cfg', + 'policy: enabled\n', + ) + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + assert 'WARNING' not in log + assert 'Traceback' not in log + assert 'Failed to load metadata and userdata' not in log + assert ("Getting data from failed") not in log diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py new file mode 100644 index 00000000000..5e90cddaff9 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh671.py @@ -0,0 +1,55 @@ +"""Integration test for gh-671. + +Verify that on Azure that if a default user and password are specified +through the Azure API that a change in the default password overwrites +the old password +""" + +import crypt + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud + +OLD_PASSWORD = 'DoIM33tTheComplexityRequirements!??' +NEW_PASSWORD = 'DoIM33tTheComplexityRequirementsNow!??' + + +def _check_password(instance, unhashed_password): + shadow_password = instance.execute('getent shadow ubuntu').split(':')[1] + salt = shadow_password.rsplit('$', 1)[0] + hashed_password = crypt.crypt(unhashed_password, salt) + assert shadow_password == hashed_password + + +@pytest.mark.azure +@pytest.mark.sru_2020_11 +def test_update_default_password(setup_image, session_cloud: IntegrationCloud): + os_profile = { + 'os_profile': { + 'admin_password': '', + 'linux_configuration': { + 'disable_password_authentication': False + } + } + } + os_profile['os_profile']['admin_password'] = OLD_PASSWORD + instance1 = session_cloud.launch(launch_kwargs={'vm_params': os_profile}) + + _check_password(instance1, OLD_PASSWORD) + + snapshot_id = instance1.cloud.cloud_instance.snapshot( + instance1.instance, + delete_provisioned_user=False + ) + + os_profile['os_profile']['admin_password'] = NEW_PASSWORD + try: + with session_cloud.launch(launch_kwargs={ + 'image_id': snapshot_id, + 'vm_params': os_profile, + }) as instance2: + _check_password(instance2, NEW_PASSWORD) + finally: + session_cloud.cloud_instance.delete_image(snapshot_id) + instance1.destroy() diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py new file mode 100644 index 00000000000..7ad0e809346 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -0,0 +1,34 @@ +"""Integration test for lp-1813396 + +Ensure gpg is called with no tty flag. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import verify_ordered_items_in_text + + +USER_DATA = """\ +#cloud-config +apt: + sources: + cloudinit: + source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main' + keyserver: keyserver.ubuntu.com + keyid: E4D304DF +""" # noqa: E501 + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(USER_DATA) +def test_gpg_no_tty(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + to_verify = [ + "Running command ['gpg', '--no-tty', " + "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] " + "with allowed return codes [0] (shell=False, capture=True)", + "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'", + "finish: modules-config/config-apt-configure: SUCCESS", + ] + verify_ordered_items_in_text(to_verify, log) diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py new file mode 100644 index 00000000000..54c88d82c54 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -0,0 +1,71 @@ +"""Integration test for LP: #1898997 + +cloud-init was incorrectly excluding Open vSwitch bridge members from its list +of interfaces. This meant that instances which had only one interface which +was in an Open vSwitch bridge would not boot correctly: cloud-init would not +find the expected physical interfaces, so would not apply network config. + +This test checks that cloud-init believes it has successfully applied the +network configuration, and confirms that the bridge can be used to ping the +default gateway. +""" +import pytest + +MAC_ADDRESS = "de:ad:be:ef:12:34" + + +NETWORK_CONFIG = """\ +bridges: + ovs-br: + dhcp4: true + interfaces: + - enp5s0 + macaddress: 52:54:00:d9:08:1c + mtu: 1500 + openvswitch: {{}} +ethernets: + enp5s0: + mtu: 1500 + set-name: enp5s0 + match: + macaddress: {} +version: 2 +""".format(MAC_ADDRESS) + + +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, +}) +@pytest.mark.lxd_vm +@pytest.mark.not_bionic +@pytest.mark.not_xenial +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +class TestInterfaceListingWithOpenvSwitch: + def test_ovs_member_interfaces_not_excluded(self, client): + # We need to install openvswitch for our provided network configuration + # to apply (on next boot), so DHCP on our default interface to fetch it + client.execute("dhclient enp5s0") + client.execute("apt update -qqy") + client.execute("apt-get install -qqy openvswitch-switch") + + # Now our networking config should successfully apply on a clean reboot + client.execute("cloud-init clean --logs") + client.restart() + + cloudinit_output = client.read_from_file("/var/log/cloud-init.log") + + # Confirm that the network configuration was applied successfully + assert "WARN" not in cloudinit_output + # Confirm that the applied network config created the OVS bridge + assert "ovs-br" in client.execute("ip addr") + + # Test that we can ping our gateway using our bridge + gateway = client.execute( + "ip -4 route show default | awk '{ print $3 }'" + ) + ping_result = client.execute( + "ping -c 1 -W 1 -I ovs-br {}".format(gateway) + ) + assert ping_result.ok diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index 3fe7d0d0fe0..395cace0999 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -22,7 +22,7 @@ def test_permissions_unchanged(self, client): assert "600" == _get_log_perms(client) # Reboot - client.instance.restart() + client.restart(raise_on_cloudinit_failure=True) # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 88ac44081d5..72d770581cf 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -6,7 +6,7 @@ from pycloudlib.lxd.instance import LXDInstance import cloudinit -from cloudinit.subp import subp +from cloudinit.subp import subp, ProcessExecutionError from tests.integration_tests import integration_settings from tests.integration_tests.instances import ( IntegrationEc2Instance, @@ -25,6 +25,65 @@ log = logging.getLogger('integration_testing') +def _get_ubuntu_series() -> list: + """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series""" + out = "" + try: + out, _err = subp(["ubuntu-distro-info", "-a"]) + except ProcessExecutionError: + log.info( + "ubuntu-distro-info (from the distro-info package) must be" + " installed to guess Ubuntu os/release" + ) + return out.splitlines() + + +class ImageSpecification: + """A specification of an image to launch for testing. + + If either of ``os`` and ``release`` are not specified, an attempt will be + made to infer the correct values for these on instantiation. + + :param image_id: + The image identifier used by the rest of the codebase to launch this + image. + :param os: + An optional string describing the operating system this image is for + (e.g. "ubuntu", "rhel", "freebsd"). + :param release: + A optional string describing the operating system release (e.g. + "focal", "8"; the exact values here will depend on the OS). + """ + + def __init__( + self, + image_id: str, + os: "Optional[str]" = None, + release: "Optional[str]" = None, + ): + if image_id in _get_ubuntu_series(): + if os is None: + os = "ubuntu" + if release is None: + release = image_id + + self.image_id = image_id + self.os = os + self.release = release + log.info( + "Detected image: image_id=%s os=%s release=%s", + self.image_id, + self.os, + self.release, + ) + + @classmethod + def from_os_image(cls): + """Return an ImageSpecification for integration_settings.OS_IMAGE.""" + parts = integration_settings.OS_IMAGE.split("::", 2) + return cls(*parts) + + class IntegrationCloud(ABC): datasource = None # type: Optional[str] integration_instance_cls = IntegrationInstance @@ -32,7 +91,23 @@ class IntegrationCloud(ABC): def __init__(self, settings=integration_settings): self.settings = settings self.cloud_instance = self._get_cloud_instance() - self.image_id = self._get_initial_image() + if settings.PUBLIC_SSH_KEY is not None: + # If we have a non-default key, use it. + self.cloud_instance.use_key( + settings.PUBLIC_SSH_KEY, name=settings.KEYPAIR_NAME + ) + elif settings.KEYPAIR_NAME is not None: + # Even if we're using the default key, it may still have a + # different name in the clouds, so we need to set it separately. + self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME + self._released_image_id = self._get_initial_image() + self.snapshot_id = None + + @property + def image_id(self): + if self.snapshot_id: + return self.snapshot_id + return self._released_image_id def emit_settings_to_log(self) -> None: log.info( @@ -50,21 +125,20 @@ def _get_cloud_instance(self): raise NotImplementedError def _get_initial_image(self): - image_id = self.settings.OS_IMAGE + image = ImageSpecification.from_os_image() try: - image_id = self.cloud_instance.released_image( - self.settings.OS_IMAGE) + return self.cloud_instance.released_image(image.image_id) except (ValueError, IndexError): - pass - return image_id + return image.image_id def _perform_launch(self, launch_kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) return pycloudlib_instance - def launch(self, user_data=None, launch_kwargs=None, + def launch(self, user_data=None, launch_kwargs=None, wait=True, settings=integration_settings): + if launch_kwargs is None: + launch_kwargs = {} if self.settings.EXISTING_INSTANCE_ID: log.info( 'Not launching instance due to EXISTING_INSTANCE_ID. ' @@ -73,13 +147,15 @@ def launch(self, user_data=None, launch_kwargs=None, self.settings.EXISTING_INSTANCE_ID ) return + if 'wait' in launch_kwargs: + raise Exception("Specify 'wait' directly to launch, " + "not in 'launch_kwargs'") kwargs = { 'image_id': self.image_id, 'user_data': user_data, 'wait': False, } - if launch_kwargs: - kwargs.update(launch_kwargs) + kwargs.update(launch_kwargs) log.info( "Launching instance with launch_kwargs:\n{}".format( "\n".join("{}={}".format(*item) for item in kwargs.items()) @@ -87,9 +163,17 @@ def launch(self, user_data=None, launch_kwargs=None, ) pycloudlib_instance = self._perform_launch(kwargs) - + if wait: + pycloudlib_instance.wait(raise_on_cloudinit_failure=False) log.info('Launched instance: %s', pycloudlib_instance) - return self.get_instance(pycloudlib_instance, settings) + instance = self.get_instance(pycloudlib_instance, settings) + if wait: + # If we aren't waiting, we can't rely on command execution here + log.info( + 'cloud-init version: %s', + instance.execute("cloud-init --version") + ) + return instance def get_instance(self, cloud_instance, settings=integration_settings): return self.integration_instance_cls(self, cloud_instance, settings) @@ -100,6 +184,19 @@ def destroy(self): def snapshot(self, instance): return self.cloud_instance.snapshot(instance, clean=True) + def delete_snapshot(self): + if self.snapshot_id: + if self.settings.KEEP_IMAGE: + log.info( + 'NOT deleting snapshot image created for this testrun ' + 'because KEEP_IMAGE is True: %s', self.snapshot_id) + else: + log.info( + 'Deleting snapshot image created for this testrun: %s', + self.snapshot_id + ) + self.cloud_instance.delete_image(self.snapshot_id) + class Ec2Cloud(IntegrationCloud): datasource = 'ec2' @@ -130,7 +227,14 @@ def _get_cloud_instance(self): return Azure(tag='azure-integration-test') def destroy(self): - self.cloud_instance.delete_resource_group() + if self.settings.KEEP_INSTANCE: + log.info( + 'NOT deleting resource group because KEEP_INSTANCE is true ' + 'and deleting resource group would also delete instance. ' + 'Instance and resource group must both be manually deleted.' + ) + else: + self.cloud_instance.delete_resource_group() class OciCloud(IntegrationCloud): @@ -191,7 +295,6 @@ def _perform_launch(self, launch_kwargs): if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': self._mount_source(pycloudlib_instance) pycloudlib_instance.start(wait=False) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) return pycloudlib_instance diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 73b44bfc894..99dd8d9ed52 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -1,18 +1,29 @@ # This file is part of cloud-init. See LICENSE file for license information. +import datetime +import functools import logging -import os import pytest +import os import sys +from tarfile import TarFile from contextlib import contextmanager +from pathlib import Path from tests.integration_tests import integration_settings from tests.integration_tests.clouds import ( + AzureCloud, Ec2Cloud, GceCloud, - AzureCloud, - OciCloud, + ImageSpecification, + IntegrationCloud, LxdContainerCloud, LxdVmCloud, + OciCloud, + _LxdIntegrationCloud, +) +from tests.integration_tests.instances import ( + CloudInitSource, + IntegrationInstance, ) @@ -28,6 +39,9 @@ 'lxd_container': LxdContainerCloud, 'lxd_vm': LxdVmCloud, } +os_list = ["ubuntu"] + +session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') def pytest_runtest_setup(item): @@ -54,6 +68,18 @@ def pytest_runtest_setup(item): if supported_platforms and current_platform not in supported_platforms: pytest.skip(unsupported_message) + image = ImageSpecification.from_os_image() + current_os = image.os + supported_os_set = set(os_list).intersection(test_marks) + if current_os and supported_os_set and current_os not in supported_os_set: + pytest.skip("Cannot run on OS {}".format(current_os)) + if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE: + pytest.skip('Test marked unstable. Manually remove mark to run it') + + current_release = image.release + if "not_{}".format(current_release) in test_marks: + pytest.skip("Cannot run on release {}".format(current_release)) + # disable_subp_usage is defined at a higher level, but we don't # want it applied here @@ -75,82 +101,137 @@ def session_cloud(): cloud = platforms[integration_settings.PLATFORM]() cloud.emit_settings_to_log() yield cloud - cloud.destroy() + try: + cloud.delete_snapshot() + finally: + cloud.destroy() + +def get_validated_source( + session_cloud: IntegrationCloud, + source=integration_settings.CLOUD_INIT_SOURCE +) -> CloudInitSource: + if source == 'NONE': + return CloudInitSource.NONE + elif source == 'IN_PLACE': + if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: + raise ValueError( + 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') + return CloudInitSource.IN_PLACE + elif source == 'PROPOSED': + return CloudInitSource.PROPOSED + elif source.startswith('ppa:'): + return CloudInitSource.PPA + elif os.path.isfile(str(source)): + return CloudInitSource.DEB_PACKAGE + raise ValueError( + 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source)) -@pytest.fixture(scope='session', autouse=True) -def setup_image(session_cloud): + +@pytest.fixture(scope='session') +def setup_image(session_cloud: IntegrationCloud): """Setup the target environment with the correct version of cloud-init. So we can launch instances / run tests with the correct image """ - client = None + + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + return log.info('Setting up environment for %s', session_cloud.datasource) - if integration_settings.CLOUD_INIT_SOURCE == 'NONE': - pass # that was easy - elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE': - if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: - raise ValueError( - 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') - # The mount needs to happen after the instance is created, so - # no further action needed here - elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED': - client = session_cloud.launch() - client.install_proposed_image() - elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'): - client = session_cloud.launch() - client.install_ppa(integration_settings.CLOUD_INIT_SOURCE) - elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)): - client = session_cloud.launch() - client.install_deb() - else: - raise ValueError( - 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format( - integration_settings.CLOUD_INIT_SOURCE)) - if client: - # Even if we're keeping instances, we don't want to keep this - # one around as it was just for image creation - client.destroy() + client = session_cloud.launch() + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() log.info('Done with environment setup') +def _collect_logs(instance: IntegrationInstance, node_id: str, + test_failed: bool): + """Collect logs from remote instance. + + Args: + instance: The current IntegrationInstance to collect logs from + node_id: The pytest representation of this test, E.g.: + tests/integration_tests/test_example.py::TestExample.test_example + test_failed: If test failed or not + """ + if any([ + integration_settings.COLLECT_LOGS == 'NEVER', + integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed + ]): + return + instance.execute( + 'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz') + node_id_path = Path( + node_id + .replace('.py', '') # Having a directory with '.py' would be weird + .replace('::', os.path.sep) # Turn classes/tests into paths + .replace('[', '-') # For parametrized names + .replace(']', '') # For parameterized names + ) + log_dir = Path( + integration_settings.LOCAL_LOG_PATH + ) / session_start_time / node_id_path + log.info("Writing logs to %s", log_dir) + if not log_dir.exists(): + log_dir.mkdir(parents=True) + tarball_path = log_dir / 'cloud-init.tar.gz' + instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path) + + tarball = TarFile.open(str(tarball_path)) + tarball.extractall(path=str(log_dir)) + tarball_path.unlink() + + @contextmanager -def _client(request, fixture_utils, session_cloud): +def _client(request, fixture_utils, session_cloud: IntegrationCloud): """Fixture implementation for the client fixtures. Launch the dynamic IntegrationClient instance using any provided userdata, yield to the test, then cleanup """ - user_data = fixture_utils.closest_marker_first_arg_or( - request, 'user_data', None) - name = fixture_utils.closest_marker_first_arg_or( - request, 'instance_name', None + getter = functools.partial( + fixture_utils.closest_marker_first_arg_or, request, default=None ) + user_data = getter('user_data') + name = getter('instance_name') + lxd_config_dict = getter('lxd_config_dict') + launch_kwargs = {} if name is not None: - launch_kwargs = {"name": name} + launch_kwargs["name"] = name + if lxd_config_dict is not None: + if not isinstance(session_cloud, _LxdIntegrationCloud): + pytest.skip("lxd_config_dict requires LXD") + launch_kwargs["config_dict"] = lxd_config_dict + with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs ) as instance: + previous_failures = request.session.testsfailed yield instance + test_failed = request.session.testsfailed - previous_failures > 0 + _collect_logs(instance, request.node.nodeid, test_failed) @pytest.yield_fixture -def client(request, fixture_utils, session_cloud): +def client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs for every test.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='module') -def module_client(request, fixture_utils, session_cloud): +def module_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per module.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='class') -def class_client(request, fixture_utils, session_cloud): +def class_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per class.""" with _client(request, fixture_utils, session_cloud) as client: yield client @@ -180,3 +261,20 @@ def pytest_assertrepr_compare(op, left, right): '"{}" not in cloud-init.log string; unexpectedly found on' " these lines:".format(left) ] + found_lines + + +def pytest_configure(config): + """Perform initial configuration, before the test runs start. + + This hook is only called if integration tests are being executed, so we can + use it to configure defaults for integration testing that differ from the + rest of the tests in the codebase. + + See + https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure + for pytest's documentation. + """ + if "log_cli_level" in config.option and not config.option.log_cli_level: + # If log_cli_level is available in this version of pytest and not set + # to anything, set it to INFO. + config.option.log_cli_level = "INFO" diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 9b13288c696..4321ce07f9e 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +from enum import Enum import logging import os import uuid @@ -25,9 +26,27 @@ def _get_tmp_path(): return '/var/tmp/{}.tmp'.format(tmp_filename) -class IntegrationInstance: - use_sudo = True +class CloudInitSource(Enum): + """Represents the cloud-init image source setting as a defined value. + + Values here represent all possible values for CLOUD_INIT_SOURCE in + tests/integration_tests/integration_settings.py. See that file for an + explanation of these values. If the value set there can't be parsed into + one of these values, an exception will be raised + """ + NONE = 1 + IN_PLACE = 2 + PROPOSED = 3 + PPA = 4 + DEB_PACKAGE = 5 + + def installs_new_version(self): + if self.name in [self.NONE.name, self.IN_PLACE.name]: + return False + return True + +class IntegrationInstance: def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, settings=integration_settings): self.cloud = cloud @@ -37,24 +56,35 @@ def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, def destroy(self): self.instance.delete() - def execute(self, command, *, use_sudo=None) -> Result: + def restart(self, raise_on_cloudinit_failure=False): + """Restart this instance (via cloud mechanism) and wait for boot. + + This wraps pycloudlib's `BaseInstance.restart` to pass + `raise_on_cloudinit_failure=False` to `BaseInstance.wait`, mirroring + our launch behaviour. + """ + self.instance.restart(wait=False) + log.info("Instance restarted; waiting for boot") + self.instance.wait( + raise_on_cloudinit_failure=raise_on_cloudinit_failure + ) + + def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: raise Exception('Root user cannot run unprivileged') - if use_sudo is None: - use_sudo = self.use_sudo return self.instance.execute(command, use_sudo=use_sudo) def pull_file(self, remote_path, local_path): # First copy to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.execute('cp {} {}'.format(remote_path, tmp_path)) - self.instance.pull_file(tmp_path, local_path) + self.instance.execute('cp {} {}'.format(str(remote_path), tmp_path)) + self.instance.pull_file(tmp_path, str(local_path)) def push_file(self, local_path, remote_path): # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.push_file(local_path, tmp_path) - self.execute('mv {} {}'.format(tmp_path, remote_path)) + self.instance.push_file(str(local_path), tmp_path) + self.execute('mv {} {}'.format(tmp_path, str(remote_path))) def read_from_file(self, remote_path) -> str: result = self.execute('cat {}'.format(remote_path)) @@ -83,36 +113,52 @@ def write_to_file(self, remote_path, contents: str): os.unlink(tmp_file.name) def snapshot(self): - return self.cloud.snapshot(self.instance) - - def _install_new_cloud_init(self, remote_script): - self.execute(remote_script) + image_id = self.cloud.snapshot(self.instance) + log.info('Created new image: %s', image_id) + return image_id + + def install_new_cloud_init( + self, + source: CloudInitSource, + take_snapshot=True + ): + if source == CloudInitSource.DEB_PACKAGE: + self.install_deb() + elif source == CloudInitSource.PPA: + self.install_ppa() + elif source == CloudInitSource.PROPOSED: + self.install_proposed_image() + else: + raise Exception( + "Specified to install {} which isn't supported here".format( + source) + ) version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) self.instance.clean() - image_id = self.snapshot() - log.info('Created new image: %s', image_id) - self.cloud.image_id = image_id + if take_snapshot: + snapshot_id = self.snapshot() + self.cloud.snapshot_id = snapshot_id def install_proposed_image(self): log.info('Installing proposed image') remote_script = ( - '{sudo} echo deb "http://archive.ubuntu.com/ubuntu ' + 'echo deb "http://archive.ubuntu.com/ubuntu ' '$(lsb_release -sc)-proposed main" | ' - '{sudo} tee /etc/apt/sources.list.d/proposed.list\n' - '{sudo} apt-get update -q\n' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '') - self._install_new_cloud_init(remote_script) + 'tee /etc/apt/sources.list.d/proposed.list\n' + 'apt-get update -q\n' + 'apt-get install -qy cloud-init' + ) + self.execute(remote_script) - def install_ppa(self, repo): + def install_ppa(self): log.info('Installing PPA') remote_script = ( - '{sudo} add-apt-repository {repo} -y && ' - '{sudo} apt-get update -q && ' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '', repo=repo) - self._install_new_cloud_init(remote_script) + 'add-apt-repository {repo} -y && ' + 'apt-get update -q && ' + 'apt-get install -qy cloud-init' + ).format(repo=self.settings.CLOUD_INIT_SOURCE) + self.execute(remote_script) def install_deb(self): log.info('Installing deb package') @@ -122,9 +168,8 @@ def install_deb(self): self.push_file( local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path) - remote_script = '{sudo} dpkg -i {path}'.format( - sudo='sudo' if self.use_sudo else '', path=remote_path) - self._install_new_cloud_init(remote_script) + remote_script = 'dpkg -i {path}'.format(path=remote_path) + self.execute(remote_script) def __enter__(self): return self @@ -151,4 +196,4 @@ class IntegrationOciInstance(IntegrationInstance): class IntegrationLxdInstance(IntegrationInstance): - use_sudo = False + pass diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index a0609f7ec19..6cabf3d8a49 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -1,15 +1,22 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +from distutils.util import strtobool + ################################################################## # LAUNCH SETTINGS ################################################################## # Keep instance (mostly for debugging) when test is finished KEEP_INSTANCE = False +# Keep snapshot image (mostly for debugging) when test is finished +KEEP_IMAGE = False +# Run tests marked as unstable. Expect failures and dragons. +RUN_UNSTABLE = False # One of: # lxd_container +# lxd_vm # azure # ec2 # gce @@ -21,8 +28,11 @@ INSTANCE_TYPE = None # Determines the base image to use or generate new images from. -# Can be the name of the OS if running a stock image, -# otherwise the id of the image being used if using a custom image +# +# This can be the name of an Ubuntu release, or in the format +# [::[::]]. If given, os and release should describe +# the image specified by image_id. (Ubuntu releases are converted to this +# format internally; in this case, to "focal::ubuntu::focal".) OS_IMAGE = 'focal' # Populate if you want to use a pre-launched instance instead of @@ -55,6 +65,28 @@ # A path to a valid package to be uploaded and installed CLOUD_INIT_SOURCE = 'NONE' +# Before an instance is torn down, we run `cloud-init collect-logs` +# and transfer them locally. These settings specify when to collect these +# logs and where to put them on the local filesystem +# One of: +# 'ALWAYS' +# 'ON_ERROR' +# 'NEVER' +COLLECT_LOGS = 'ON_ERROR' +LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs' + +################################################################## +# SSH KEY SETTINGS +################################################################## + +# A path to the public SSH key to use for test runs. (Defaults to pycloudlib's +# default behaviour, using ~/.ssh/id_rsa.pub.) +PUBLIC_SSH_KEY = None + +# For clouds which use named keypairs for SSH connection, the name that is used +# for the keypair. (Defaults to pycloudlib's default behaviour.) +KEYPAIR_NAME = None + ################################################################## # GCE SPECIFIC SETTINGS ################################################################## @@ -91,6 +123,12 @@ # Perhaps a bit too hacky, but it works :) current_settings = [var for var in locals() if var.isupper()] for setting in current_settings: - globals()[setting] = os.getenv( + env_setting = os.getenv( 'CLOUD_INIT_{}'.format(setting), globals()[setting] ) + if isinstance(env_setting, str): + try: + env_setting = bool(strtobool(env_setting.strip())) + except ValueError: + pass + globals()[setting] = env_setting diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py new file mode 100644 index 00000000000..40baae7b79d --- /dev/null +++ b/tests/integration_tests/log_utils.py @@ -0,0 +1,11 @@ +def verify_ordered_items_in_text(to_verify: list, text: str): + """Assert all items in list appear in order in text. + + Examples: + verify_ordered_items_in_text(['a', '1'], 'ab1') # passes + verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError + """ + index = 0 + for item in to_verify: + index = text[index:].find(item) + assert index > -1, "Expected item not found: '{}'".format(item) diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py index d2bcc61a7fe..28cbe19ff05 100644 --- a/tests/integration_tests/modules/test_apt_configure_sources_list.py +++ b/tests/integration_tests/modules/test_apt_configure_sources_list.py @@ -40,6 +40,7 @@ @pytest.mark.ci +@pytest.mark.ubuntu class TestAptConfigureSourcesList: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py new file mode 100644 index 00000000000..89c01a9c451 --- /dev/null +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -0,0 +1,91 @@ +"""Integration tests for cc_ca_certs. + +(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.) + +TODO: +* Mark this as running on Debian and Alpine (once we have marks for that) +* Implement testing for the RHEL-specific paths +""" +import os.path + +import pytest + + +USER_DATA = """\ +#cloud-config +ca-certs: + remove-defaults: true + trusted: + - | + -----BEGIN CERTIFICATE----- + MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx + DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP + d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl + bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW + E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz + MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93 + d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl + MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG + 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc + k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ + yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX + RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6 + q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2 + uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S + vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o + 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4 + Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF + z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1 + SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3 + Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT + TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5 + ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3 + DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr + mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf + PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ + 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM + slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L + ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT + Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro + RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586 + CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l + hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i + DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== + -----END CERTIFICATE----- +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(USER_DATA) +class TestCaCerts: + def test_certs_updated(self, class_client): + """Test that /etc/ssl/certs is updated as we expect.""" + root = "/etc/ssl/certs" + filenames = class_client.execute(["ls", "-1", root]).splitlines() + unlinked_files = [] + links = {} + for filename in filenames: + full_path = os.path.join(root, filename) + symlink_target = class_client.execute(["readlink", full_path]) + is_symlink = symlink_target.ok + if is_symlink: + links[filename] = symlink_target + else: + unlinked_files.append(filename) + + assert ["ca-certificates.crt"] == unlinked_files + assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"] + assert ( + "/usr/share/ca-certificates/cloud-init-ca-certs.crt" + == links["cloud-init-ca-certs.pem"] + ) + + def test_cert_installed(self, class_client): + """Test that our specified cert has been installed""" + checksum = class_client.execute( + "sha256sum /etc/ssl/certs/ca-certificates.crt" + ) + assert ( + "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062" + in checksum + ) diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py new file mode 100644 index 00000000000..3f41b34d791 --- /dev/null +++ b/tests/integration_tests/modules/test_cli.py @@ -0,0 +1,45 @@ +"""Integration tests for CLI functionality + +These would be for behavior manually invoked by user from the command line +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +VALID_USER_DATA = """\ +#cloud-config +runcmd: + - echo 'hi' > /var/tmp/test +""" + +INVALID_USER_DATA = """\ +runcmd: + - echo 'hi' > /var/tmp/test +""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(VALID_USER_DATA) +def test_valid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with valid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert result.ok + assert 'Valid cloud-config: system userdata' == result.stdout.strip() + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(INVALID_USER_DATA) +def test_invalid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with invalid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert not result.ok + assert 'Cloud config schema errors' in result.stderr + assert 'needs to begin with "#cloud-config"' in result.stderr diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py new file mode 100644 index 00000000000..cbf111795e4 --- /dev/null +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -0,0 +1,48 @@ +"""Integration tests for LXD bridge creation. + +(This is ported from +``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.) +""" +import pytest +import yaml + + +USER_DATA = """\ +#cloud-config +lxd: + init: + storage_backend: dir + bridge: + mode: new + name: lxdbr0 + ipv4_address: 10.100.100.1 + ipv4_netmask: 24 + ipv4_dhcp_first: 10.100.100.100 + ipv4_dhcp_last: 10.100.100.200 + ipv4_nat: true + domain: lxd +""" + + +@pytest.mark.no_container +@pytest.mark.user_data(USER_DATA) +class TestLxdBridge: + + @pytest.mark.parametrize("binary_name", ["lxc", "lxd"]) + def test_binaries_installed(self, class_client, binary_name): + """Check that the expected LXD binaries are installed""" + assert class_client.execute(["which", binary_name]).ok + + @pytest.mark.not_xenial + @pytest.mark.sru_2020_11 + def test_bridge(self, class_client): + """Check that the given bridge is configured""" + cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") + assert "WARN" not in cloud_init_log + + # The bridge should exist + assert class_client.execute("ip addr show lxdbr0") + + raw_network_config = class_client.execute("lxc network show lxdbr0") + network_config = yaml.safe_load(raw_network_config) + assert "10.100.100.1/24" == network_config["config"]["ipv4.address"] diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 8a38ad8417b..28d741bc9e7 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -26,6 +26,7 @@ """ +@pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestPackageUpdateUpgradeInstall: diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py new file mode 100644 index 00000000000..844dccfacf5 --- /dev/null +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -0,0 +1,90 @@ +"""Integration test of the cc_power_state_change module. + +Test that the power state config options work as expected. +""" + +import time + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import verify_ordered_items_in_text + +USER_DATA = """\ +#cloud-config +power_state: + delay: {delay} + mode: {mode} + message: msg + timeout: {timeout} + condition: {condition} +""" + + +def _detect_reboot(instance: IntegrationInstance): + # We'll wait for instance up here, but we don't know if we're + # detecting the first boot or second boot, so we also check + # the logs to ensure we've booted twice. If the logs show we've + # only booted once, wait until we've booted twice + instance.instance.wait(raise_on_cloudinit_failure=False) + for _ in range(600): + try: + log = instance.read_from_file('/var/log/cloud-init.log') + boot_count = log.count("running 'init-local'") + if boot_count == 1: + instance.instance.wait(raise_on_cloudinit_failure=False) + elif boot_count > 1: + break + except Exception: + pass + time.sleep(1) + else: + raise Exception('Could not detect reboot') + + +def _can_connect(instance): + return instance.execute('true').ok + + +# This test is marked unstable because even though it should be able to +# run anywhere, I can only get it to run in an lxd container, and even then +# occasionally some timing issues will crop up. +@pytest.mark.unstable +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +@pytest.mark.lxd_container +class TestPowerChange: + @pytest.mark.parametrize('mode,delay,timeout,expected', [ + ('poweroff', 'now', '10', 'will execute: shutdown -P now msg'), + ('reboot', 'now', '0', 'will execute: shutdown -r now msg'), + ('halt', '+1', '0', 'will execute: shutdown -H +1 msg'), + ]) + def test_poweroff(self, session_cloud: IntegrationCloud, + mode, delay, timeout, expected): + with session_cloud.launch( + user_data=USER_DATA.format( + delay=delay, mode=mode, timeout=timeout, condition='true'), + wait=False + ) as instance: + if mode == 'reboot': + _detect_reboot(instance) + else: + instance.instance.wait_for_stop() + instance.instance.start(wait=True) + log = instance.read_from_file('/var/log/cloud-init.log') + assert _can_connect(instance) + lines_to_check = [ + 'Running module power-state-change', + expected, + "running 'init-local'", + 'config-power-state-change already ran', + ] + verify_ordered_items_in_text(lines_to_check, log) + + @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff', + timeout='0', condition='false')) + def test_poweroff_false_condition(self, client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert _can_connect(client) + assert 'Condition was false. Will not perform state change' in log diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py index b626f6b03ad..481edbaaa85 100644 --- a/tests/integration_tests/modules/test_snap.py +++ b/tests/integration_tests/modules/test_snap.py @@ -20,6 +20,7 @@ @pytest.mark.ci +@pytest.mark.ubuntu class TestSnap: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py index 45d37d6ca33..3db573b548d 100644 --- a/tests/integration_tests/modules/test_ssh_import_id.py +++ b/tests/integration_tests/modules/test_ssh_import_id.py @@ -3,6 +3,10 @@ This test specifies ssh keys to be imported by the ``ssh_import_id`` module and then checks that if the ssh keys were successfully imported. +TODO: +* This test assumes that SSH keys will be imported into the /home/ubuntu; this + will need modification to run on other OSes. + (This is ported from ``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)""" @@ -18,6 +22,7 @@ @pytest.mark.ci +@pytest.mark.ubuntu class TestSshImportId: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py index 27d193c1815..6aae96aed2e 100644 --- a/tests/integration_tests/modules/test_ssh_keys_provided.py +++ b/tests/integration_tests/modules/test_ssh_keys_provided.py @@ -83,66 +83,78 @@ @pytest.mark.user_data(USER_DATA) class TestSshKeysProvided: - def test_ssh_dsa_keys_provided(self, class_client): - """Test dsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub") - assert ( - "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" - "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out - - """Test dsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key") - assert ( - "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" - "hOVAfzZ6+jklP") in out - - def test_ssh_rsa_keys_provided(self, class_client): - """Test rsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub") - assert ( - "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" - "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out - - """Test rsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key") - assert ( - "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" - "RQvLZpMRdywBm") in out - - def test_ssh_rsa_certificate_provided(self, class_client): - """Test rsa certificate was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub") - assert ( - "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" - "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out - - def test_ssh_certificate_updated_sshd_config(self, class_client): - """Test ssh certificate was added to /etc/ssh/sshd_config.""" - out = class_client.read_from_file("/etc/ssh/sshd_config").strip() - assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out - - def test_ssh_ecdsa_keys_provided(self, class_client): - """Test ecdsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub") - assert ( - "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" - "BBFsS5Tvky/IC/dXhE/afxxU") in out - - """Test ecdsa private key generated.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key") - assert ( - "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" - "5mpZqxgX4vcgb") in out - - def test_ssh_ed25519_keys_provided(self, class_client): - """Test ed25519 public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub") - assert ( - "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" - "G15dqjQ2XkNVOEnb5") in out - - """Test ed25519 private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key") - assert ( - "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" - "OhteXao0Nl5DVThJ2+Q") in out + @pytest.mark.parametrize( + "config_path,expected_out", + ( + ( + "/etc/ssh/ssh_host_dsa_key.pub", + ( + "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" + "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM" + ), + ), + ( + "/etc/ssh/ssh_host_dsa_key", + ( + "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" + "hOVAfzZ6+jklP" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key.pub", + ( + "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" + "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key", + ( + "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" + "RQvLZpMRdywBm" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key-cert.pub", + ( + "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" + "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD" + ), + ), + ( + "/etc/ssh/sshd_config", + "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub", + ), + ( + "/etc/ssh/ssh_host_ecdsa_key.pub", + ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" + "BBFsS5Tvky/IC/dXhE/afxxU" + ), + ), + ( + "/etc/ssh/ssh_host_ecdsa_key", + ( + "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" + "5mpZqxgX4vcgb" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key.pub", + ( + "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" + "G15dqjQ2XkNVOEnb5" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key", + ( + "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" + "OhteXao0Nl5DVThJ2+Q" + ), + ), + ) + ) + def test_ssh_provided_keys(self, config_path, expected_out, class_client): + out = class_client.read_from_file(config_path).strip() + assert expected_out in out diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index 6a51f5a633a..ee08d87be16 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -2,6 +2,10 @@ This test specifies a number of users and groups via user-data, and confirms that they have been configured correctly in the system under test. + +TODO: +* This test assumes that the "ubuntu" user will be created when "default" is + specified; this will need modification to run on other OSes. """ import re @@ -41,6 +45,7 @@ @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestUsersGroups: + @pytest.mark.ubuntu @pytest.mark.parametrize( "getent_args,regex", [ diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py new file mode 100644 index 00000000000..a115bea7419 --- /dev/null +++ b/tests/integration_tests/test_upgrade.py @@ -0,0 +1,99 @@ +import logging +import pytest +import time +from pathlib import Path + +from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud +from tests.integration_tests.conftest import ( + get_validated_source, + session_start_time, +) + +log = logging.getLogger('integration_testing') + +USER_DATA = """\ +#cloud-config +hostname: SRU-worked +""" + + +def _output_to_compare(instance, file_path, netcfg_path): + commands = [ + 'hostname', + 'dpkg-query --show cloud-init', + 'cat /run/cloud-init/result.json', + # 'cloud-init init' helps us understand if our pickling upgrade paths + # have broken across re-constitution of a cached datasource. Some + # platforms invalidate their datasource cache on reboot, so we run + # it here to ensure we get a dirty run. + 'cloud-init init' + 'grep Trace /var/log/cloud-init.log', + 'cloud-id' + 'cat {}'.format(netcfg_path), + 'systemd-analyze', + 'systemd-analyze blame', + 'cloud-init analyze show', + 'cloud-init analyze blame', + ] + with file_path.open('w') as f: + for command in commands: + f.write('===== {} ====='.format(command) + '\n') + f.write(instance.execute(command) + '\n') + + +def _restart(instance): + # work around pad.lv/1908287 + try: + instance.restart(raise_on_cloudinit_failure=True) + except OSError as e: + for _ in range(10): + time.sleep(5) + result = instance.execute('cloud-init status --wait --long') + if result.ok: + return + raise e + + +@pytest.mark.sru_2020_11 +def test_upgrade(session_cloud: IntegrationCloud): + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + pytest.skip("Install method '{}' not supported for this test".format( + source + )) + return # type checking doesn't understand that skip raises + + launch_kwargs = { + 'image_id': session_cloud._get_initial_image(), + } + + image = ImageSpecification.from_os_image() + + # Get the paths to write test logs + output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH) + output_dir.mkdir(parents=True, exist_ok=True) + base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format( + platform=session_cloud.settings.PLATFORM, + os=image.release, + time=session_start_time, + ) + before_path = output_dir / base_filename.format(stage='before') + after_path = output_dir / base_filename.format(stage='after') + + # Get the network cfg file + netcfg_path = '/dev/null' + if image.os == 'ubuntu': + netcfg_path = '/etc/netplan/50-cloud-init.yaml' + if image.release == 'xenial': + netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' + + with session_cloud.launch( + launch_kwargs=launch_kwargs, user_data=USER_DATA, wait=True, + ) as instance: + _output_to_compare(instance, before_path, netcfg_path) + instance.install_new_cloud_init(source, take_snapshot=False) + instance.execute('hostname something-else') + _restart(instance) + _output_to_compare(instance, after_path, netcfg_path) + + log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e363c1f9739..dc615309d2c 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -159,6 +159,22 @@ def construct_valid_ovf_env(data=None, pubkeys=None, } } +SECONDARY_INTERFACE_NO_IP = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [] + } +} + IMDS_NETWORK_METADATA = { "interface": [ { @@ -1139,6 +1155,30 @@ def test_network_config_set_from_imds_route_metric_for_secondary_nic( dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_for_secondary_nic_no_ip( + self, m_driver): + """If an IP address is empty then there should no config for it.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): """Datasource.availability returns IMDS platformFaultDomain.""" sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} @@ -1757,7 +1797,9 @@ def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): dsrc.get_data() dsrc.setup(True) ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key1']) + # Temporarily alter this test so that SSH public keys + # from IMDS are *not* going to be in use to fix a regression. + self.assertEqual(ssh_keys, []) self.assertEqual(m_parse_certificates.call_count, 0) @mock.patch(MOCKPATH + 'get_metadata_from_imds') diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index e74a0a08b32..6e3831ed851 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -47,12 +47,20 @@ class TestConfig(TestCase): def setUp(self): super(TestConfig, self).setUp() self.name = "ca-certs" - distro = self._fetch_distro('ubuntu') self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, distro, None) self.log = logging.getLogger("TestNoConfig") self.args = [] + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def _get_cloud(self, kind): + distro = self._fetch_distro(kind) + return cloud.Cloud(None, self.paths, None, distro, None) + + def _mock_init(self): self.mocks = ExitStack() self.addCleanup(self.mocks.close) @@ -64,11 +72,6 @@ def setUp(self): self.mock_remove = self.mocks.enter_context( mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - def test_no_trusted_list(self): """ Test that no certificates are written if the 'trusted' key is not @@ -76,71 +79,95 @@ def test_no_trusted_list(self): """ config = {"ca-certs": {}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_empty_trusted_list(self): """Test that no certificate are written if 'trusted' list is empty.""" config = {"ca-certs": {"trusted": []}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_single_trusted(self): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_multiple_trusted(self): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1', 'CERT2']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_remove_default_ca_certs(self): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) def test_no_remove_defaults_if_false(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": False}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_correct_order_for_remove_then_add(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) class TestAddCaCerts(TestCase): @@ -152,12 +179,20 @@ def setUp(self): self.paths = helpers.Paths({ 'cloud_dir': tmpdir, }) + self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) def test_no_certs_in_list(self): """Test that no certificate are written if not provided.""" - with mock.patch.object(util, 'write_file') as mockobj: - cc_ca_certs.add_ca_certs([]) - self.assertEqual(mockobj.call_count, 0) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file') as mockobj: + cc_ca_certs.add_ca_certs(conf, []) + self.assertEqual(mockobj.call_count, 0) def test_single_cert_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -167,20 +202,28 @@ def test_single_cert_trailing_cr(self): ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 - cc_ca_certs.add_ca_certs([cert]) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, [cert]) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_no_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -189,24 +232,31 @@ def test_single_cert_no_trailing_cr(self): ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.add_ca_certs([cert]) + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode="wb")]) + cc_ca_certs.add_ca_certs(conf, [cert]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode="wb")]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_to_empty_existing_ca_file(self): """Test adding a single certificate to the trusted CAs @@ -215,20 +265,22 @@ def test_single_cert_to_empty_existing_ca_file(self): expected = "cloud-init-ca-certs.crt\n" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file', autospec=True)) - mock_stat = mocks.enter_context( - mock.patch("cloudinit.config.cc_ca_certs.os.stat") - ) - mock_stat.return_value.st_size = 0 + self.m_stat.return_value.st_size = 0 - cc_ca_certs.add_ca_certs([cert]) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file', + autospec=True) as m_write: - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) + cc_ca_certs.add_ca_certs(conf, [cert]) + + m_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + m_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -236,32 +288,41 @@ def test_multiple_certs(self): expected_cert_file = "\n".join(certs) ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - cc_ca_certs.add_ca_certs(certs) + cc_ca_certs.add_ca_certs(conf, certs) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - expected_cert_file, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode='wb')]) + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + expected_cert_file, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode='wb')]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + mock_load.assert_called_once_with(conf['ca_cert_config']) class TestUpdateCaCerts(unittest.TestCase): def test_commands(self): - with mock.patch.object(subp, 'subp') as mockobj: - cc_ca_certs.update_ca_certs() - mockobj.assert_called_once_with( - ["update-ca-certificates"], capture=False) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(subp, 'subp') as mockobj: + cc_ca_certs.update_ca_certs(conf) + mockobj.assert_called_once_with( + conf['ca_cert_update_cmd'], capture=False) class TestRemoveDefaultCaCerts(TestCase): @@ -275,24 +336,31 @@ def setUp(self): }) def test_commands(self): - with ExitStack() as mocks: - mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp')) - - cc_ca_certs.remove_default_ca_certs('ubuntu') - - mock_delete.assert_has_calls([ - mock.call("/usr/share/ca-certificates/"), - mock.call("/etc/ssl/certs/")]) - - mock_write.assert_called_once_with( - "/etc/ca-certificates.conf", "", mode=0o644) - - mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates ca-certificates/trust_new_crts select no") + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_delete = mocks.enter_context( + mock.patch.object(util, 'delete_dir_contents')) + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_subp = mocks.enter_context( + mock.patch.object(subp, 'subp')) + + cc_ca_certs.remove_default_ca_certs(distro_name, conf) + + mock_delete.assert_has_calls([ + mock.call(conf['ca_cert_path']), + mock.call(conf['ca_cert_system_path'])]) + + if conf['ca_cert_config'] is not None: + mock_write.assert_called_once_with( + conf['ca_cert_config'], "", mode=0o644) + + if distro_name in ['debian', 'ubuntu']: + mock_subp.assert_called_once_with( + ('debconf-set-selections', '-'), + "ca-certificates \ +ca-certificates/trust_new_crts select no") # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 70453683d0c..bf0cdabb4d6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1365,10 +1365,11 @@ }, 'expected_sysconfig_rhel': { 'ifcfg-iface0': textwrap.dedent("""\ - BOOTPROTO=none + BOOTPROTO=dhcp DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_AUTOCONF=no IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 1e0c3ea43da..d6ca6d1bc6d 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -1,4 +1,5 @@ ader1990 +ajmyyra AlexBaranowski Aman306 aswinrajamannar @@ -6,6 +7,7 @@ beezly bipinbachhao BirknerAlex candlerb +cawamata dermotbradley dhensby eandersson @@ -21,6 +23,7 @@ manuelisimo marlluslustosa matthewruffell nishigori +olivierlemasle omBratteng onitake riedel @@ -31,3 +34,5 @@ TheRealFalcon tomponline tsanghan WebSpider +xiachen-rh +xnox diff --git a/tox.ini b/tox.ini index 022b918d6c5..7c71f0f6763 100644 --- a/tox.ini +++ b/tox.ini @@ -164,6 +164,8 @@ setenv = # TODO: s/--strict/--strict-markers/ once xenial support is dropped testpaths = cloudinit tests/unittests addopts = --strict +log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s +log_date_format = %Y-%m-%d %H:%M:%S markers = allow_subp_for: allow subp usage for the given commands (disable_subp_usage) allow_all_subp: allow all subp usage (disable_subp_usage) @@ -173,9 +175,14 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform + lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_vm: test will only run in LXD VM + not_xenial: test cannot run on the xenial release + not_bionic: test cannot run on the bionic release no_container: test cannot run in a container user_data: the user data to be passed to the test instance instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification + ubuntu: this test should run on Ubuntu + unstable: skip this test because it is flakey