diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 4c123ac668526d..5e961011ee4e4e 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -105,6 +105,23 @@ stages: - 3 - 4 - 5 + - template: templates/matrix.yml # context/controller (ansible-test container management) + parameters: + targets: + - name: Alpine 3.16 + test: alpine/3.16 + - name: Fedora 36 + test: fedora/36 + - name: RHEL 8.4 + test: rhel/8.4 + - name: RHEL 9.0 + test: rhel/9.0 + - name: Ubuntu 20.04 + test: ubuntu/20.04 + - name: Ubuntu 22.04 + test: ubuntu/22.04 + groups: + - 6 - stage: Docker dependsOn: [] jobs: diff --git a/changelogs/fragments/77472-ansible-test-network-disconnect-warning.yml b/changelogs/fragments/77472-ansible-test-network-disconnect-warning.yml new file mode 100644 index 00000000000000..a13026acadf581 --- /dev/null +++ b/changelogs/fragments/77472-ansible-test-network-disconnect-warning.yml @@ -0,0 +1,3 @@ +bugfixes: +- ansible-test - Don't fail if network cannot be disconnected + (https://github.com/ansible/ansible/pull/77472) diff --git a/changelogs/fragments/ansible-test-code-cleanup.yml b/changelogs/fragments/ansible-test-code-cleanup.yml index 69ce6c60500519..906ebbc8d64198 100644 --- a/changelogs/fragments/ansible-test-code-cleanup.yml +++ b/changelogs/fragments/ansible-test-code-cleanup.yml @@ -1,2 +1,4 @@ minor_changes: - ansible-test - Minor code cleanup. + - ansible-test - Miscellaneous code cleanup and type hint fixes. + - ansible-test - Remove unused Python 2.x compatibility code. diff --git a/changelogs/fragments/ansible-test-collection-identifier.yml b/changelogs/fragments/ansible-test-collection-identifier.yml new file mode 100644 index 00000000000000..5b520e26a61357 --- /dev/null +++ b/changelogs/fragments/ansible-test-collection-identifier.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Stop early with an error if the current working directory contains an invalid collection namespace or name. diff --git a/changelogs/fragments/ansible-test-container-management.yml b/changelogs/fragments/ansible-test-container-management.yml new file mode 100644 index 00000000000000..0d0c77ed8c3b58 --- /dev/null +++ b/changelogs/fragments/ansible-test-container-management.yml @@ -0,0 +1,70 @@ +major_changes: + - ansible-test - Docker and Podman are now supported on hosts with cgroup v2 unified. + Previously only cgroup v1 and cgroup v2 hybrid were supported. + - ansible-test - Docker Desktop on WSL2 is now supported (additional configuration required). + - ansible-test - Podman on WSL2 is now supported. + - ansible-test - Podman now works on container hosts without systemd. + Previously only some containers worked, while others required rootfull or rootless Podman, + but would not work with both. Some containers did not work at all. + - ansible-test - When additional cgroup setup is required on the container host, this will be automatically detected. + Instructions on how to configure the host will be provided in the error message shown. +minor_changes: + - ansible-test - When using Podman, ansible-test will detect if the loginuid used in containers is incorrect. + When this occurs a warning is displayed and the container is run with the AUDIT_CONTROL capability. + Previously containers would fail under this situation, with no useful warnings or errors given. + - ansible-test - Failure to connect to a container over SSH now results in a clear error. + Previously tests would be attempted even after initial connection attempts failed. + - ansible-test - Warnings are now shown when using containers that were built with VOLUME instructions. + - ansible-test - Unit tests now support network disconnect by default when running under Podman. + Previously this feature only worked by default under Docker. + - ansible-test - Additional log details are shown when containers fail to start or SSH connections to containers fail. + - ansible-test - Containers included with ansible-test no longer disable seccomp by default. + - ansible-test - A new ``cgroup`` option is available when running custom containers. + This option can be used to indicate a container requires cgroup v1 or that it does not use cgroup. + The default behavior assumes the container works with cgroup v2 (as well as v1). + - ansible-test - A new ``audit`` option is available when running custom containers. + This option can be used to indicate whether a container requires the AUDIT_WRITE capability. + The default is ``required``, which most containers will need when using Podman. + If necessary, the ``none`` option can be used to opt-out of the capability. + This has no effect on Docker, which always provides the capability. + - ansible-test - More details are provided about an instance when provisioning fails. + - ansible-test - Connection failures to remote provisioned hosts now show failure details as a warning. + - ansible-test - When setting the max open files for containers, the container host's limit will be checked. + If the host limit is lower than the preferred value, it will be used and a warning will be shown. + - ansible-test - Use ``stop --time 0`` followed by ``rm`` to remove ephemeral containers instead of ``rm -f``. + This speeds up teardown of ephemeral containers. + - ansible-test - Reduce the polling limit for SSHD startup in containers from 60 retries to 10. + The one second delay between retries remains in place. + - ansible-test - Integration tests can be excluded from retries triggered by the ``--retry-on-error`` option by + adding the ``retry/never`` alias. This is useful for tests that cannot pass on a retry or are too + slow to make retries useful. + - ansible-test - The ``ansible-test env`` command now detects and reports the container ID if running in a container. + - ansible-test - SSH connections from OpenSSH 8.8+ to CentOS 6 containers now work without additional configuration. + However, clients older than OpenSSH 7.0 can no longer connect to CentOS 6 containers as a result. + The container must have ``centos6`` in the image name for this work-around to be applied. + - ansible-test - SSH shell connections from OpenSSH 8.8+ to ansible-test provisioned network instances now work without additional configuration. + However, clients older than OpenSSH 7.0 can no longer open shell sessions for ansible-test provisioned network instances as a result. +bugfixes: + - ansible-test - Multiple containers now work under Podman without specifying the ``--docker-network`` option. + - ansible-test - Prevent concurrent / repeat pulls of the same container image. + - ansible-test - Prevent concurrent / repeat inspections of the same container image. + - ansible-test - Prevent concurrent execution of cached methods. + - ansible-test - Handle server errors when executing the ``docker info`` command. + - ansible-test - Show the exception type when reporting errors during instance provisioning. + - ansible-test - Pass the ``XDG_RUNTIME_DIR`` environment variable through to container commands. + - ansible-test - Connection attempts to managed remote instances no longer abort on ``Permission denied`` errors. + - ansible-test - Detection for running in a Podman or Docker container has been fixed to detect more scenarios. + The new detection relies on ``/proc/self/mountinfo`` instead of ``/proc/self/cpuset``. + Detection now works with custom cgroups and private cgroup namespaces. + - ansible-test - Avoid using ``exec`` after container startup when possible. + This improves container startup performance and avoids intermittent startup issues with some old containers. +known_issues: + - ansible-test - Using Docker on systems with SELinux may require setting SELinux to permissive mode. + Podman should work with SELinux in enforcing mode. + - ansible-test - Additional configuration may be required for certain container host and container combinations. + Further details are available in the testing documentation. + - ansible-test - Systems with Podman networking issues may be unable to run containers, when previously the issue + went unreported. Correct the networking issues to continue using ``ansible-test`` with Podman. + - ansible-test - Custom containers with ``VOLUME`` instructions may be unable to start, when previously the containers + started correctly. Remove the ``VOLUME`` instructions to resolve the issue. Containers with this + condition will cause ``ansible-test`` to emit a warning. diff --git a/changelogs/fragments/ansible-test-container-tmpfs.yml b/changelogs/fragments/ansible-test-container-tmpfs.yml new file mode 100644 index 00000000000000..678cd0770dfa5e --- /dev/null +++ b/changelogs/fragments/ansible-test-container-tmpfs.yml @@ -0,0 +1,5 @@ +bugfixes: + - ansible-test - Test containers are now run with the ``--tmpfs`` option for ``/tmp``, ``/run`` and ``/run/lock``. + This allows use of containers built without the ``VOLUME`` instruction. + Additionally, containers with those volumes defined no longer create anonymous volumes for them. + This avoids leaving behind volumes on the container host after the container is stopped and deleted. diff --git a/changelogs/fragments/ansible-test-content-config.yml b/changelogs/fragments/ansible-test-content-config.yml new file mode 100644 index 00000000000000..5eff0c34c48dd1 --- /dev/null +++ b/changelogs/fragments/ansible-test-content-config.yml @@ -0,0 +1,3 @@ +bugfixes: + - "ansible-test - Test configuration for collections is now parsed only once, prior to delegation. + Fixes issue: https://github.com/ansible/ansible/issues/78334" diff --git a/changelogs/fragments/ansible-test-docker-ulimit.yml b/changelogs/fragments/ansible-test-docker-ulimit.yml new file mode 100644 index 00000000000000..9469977934a1be --- /dev/null +++ b/changelogs/fragments/ansible-test-docker-ulimit.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Change the maximum number of open files in a test container from the default to ``10240``. diff --git a/changelogs/fragments/ansible-test-fix-type-hints.yml b/changelogs/fragments/ansible-test-fix-type-hints.yml new file mode 100644 index 00000000000000..630504db368a7d --- /dev/null +++ b/changelogs/fragments/ansible-test-fix-type-hints.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix type hints. diff --git a/changelogs/fragments/ansible-test-fix-typo-validate-modules.yaml b/changelogs/fragments/ansible-test-fix-typo-validate-modules.yaml new file mode 100644 index 00000000000000..a3cac40f7aa051 --- /dev/null +++ b/changelogs/fragments/ansible-test-fix-typo-validate-modules.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - fix a typo in validate-modules. diff --git a/changelogs/fragments/ansible-test-fix-warning-msg.yml b/changelogs/fragments/ansible-test-fix-warning-msg.yml new file mode 100644 index 00000000000000..b6b6d4838610c7 --- /dev/null +++ b/changelogs/fragments/ansible-test-fix-warning-msg.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - fix warning message about failing to run an image to include the image name diff --git a/changelogs/fragments/ansible-test-generalize-become.yml b/changelogs/fragments/ansible-test-generalize-become.yml new file mode 100644 index 00000000000000..1831c052286ec1 --- /dev/null +++ b/changelogs/fragments/ansible-test-generalize-become.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Become support for remote instance provisioning is no longer tied to a fixed list of platforms. diff --git a/changelogs/fragments/ansible-test-help-cwd.yml b/changelogs/fragments/ansible-test-help-cwd.yml new file mode 100644 index 00000000000000..ea2c19ce41c3eb --- /dev/null +++ b/changelogs/fragments/ansible-test-help-cwd.yml @@ -0,0 +1,5 @@ +minor_changes: + - ansible-test - The ``--help`` option is now available when an unsupported cwd is in use. + - ansible-test - The ``--help`` output now shows the same instructions about cwd as would be shown in error messages if the cwd is unsupported. + - ansible-test - Add ``--version`` support to show the ansible-core version. + - ansible-test - The explanation about cwd usage has been improved to explain more clearly what is required. diff --git a/changelogs/fragments/ansible-test-integration-targets-filter.yml b/changelogs/fragments/ansible-test-integration-targets-filter.yml new file mode 100644 index 00000000000000..fe907b85009e87 --- /dev/null +++ b/changelogs/fragments/ansible-test-integration-targets-filter.yml @@ -0,0 +1,4 @@ +bugfixes: + - ansible-test - Allow disabled, unsupported, unstable and destructive integration test targets to be selected using their respective prefixes. + - ansible-test - Allow unstable tests to run when targeted changes are made and the ``--allow-unstable-changed`` option is specified + (resolves https://github.com/ansible/ansible/issues/74213). diff --git a/changelogs/fragments/ansible-test-maxfd.yaml b/changelogs/fragments/ansible-test-maxfd.yaml new file mode 100644 index 00000000000000..c0fd4fcd3ecca9 --- /dev/null +++ b/changelogs/fragments/ansible-test-maxfd.yaml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Remove obsolete ``MAXFD`` display. diff --git a/changelogs/fragments/ansible-test-more-remotes.yml b/changelogs/fragments/ansible-test-more-remotes.yml new file mode 100644 index 00000000000000..7eb1615011e708 --- /dev/null +++ b/changelogs/fragments/ansible-test-more-remotes.yml @@ -0,0 +1,5 @@ +minor_changes: + - ansible-test - Add support for provisioning remotes which require ``doas`` for become. + - ansible-test - Add support for provisioning Ubuntu 20.04 remote instances. + - ansible-test - Add support for provisioning Alpine 3.16 remote instances. + - ansible-test - Add support for provisioning Fedora 36 remote instances. diff --git a/changelogs/fragments/ansible-test-multi-arch-remotes.yml b/changelogs/fragments/ansible-test-multi-arch-remotes.yml new file mode 100644 index 00000000000000..a11eef60a24499 --- /dev/null +++ b/changelogs/fragments/ansible-test-multi-arch-remotes.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Add support for multi-arch remotes. diff --git a/changelogs/fragments/ansible-test-paramiko-constraint.yaml b/changelogs/fragments/ansible-test-paramiko-constraint.yaml new file mode 100644 index 00000000000000..843814749d5ce2 --- /dev/null +++ b/changelogs/fragments/ansible-test-paramiko-constraint.yaml @@ -0,0 +1,3 @@ +bugfixes: + - ansible-test - Limit ``paramiko`` installation to versions before 2.9.0. + This is required to maintain support for systems which do not support RSA SHA-2 algorithms. diff --git a/changelogs/fragments/ansible-test-plugin-loading.yml b/changelogs/fragments/ansible-test-plugin-loading.yml new file mode 100644 index 00000000000000..64268076a29ce4 --- /dev/null +++ b/changelogs/fragments/ansible-test-plugin-loading.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix plugin loading. diff --git a/changelogs/fragments/ansible-test-podman-create-retry.yml b/changelogs/fragments/ansible-test-podman-create-retry.yml new file mode 100644 index 00000000000000..7416e89f795497 --- /dev/null +++ b/changelogs/fragments/ansible-test-podman-create-retry.yml @@ -0,0 +1,3 @@ +bugfixes: + - ansible-test - Always remove containers after failing to create/run them. + This avoids leaving behind created containers when using podman. diff --git a/changelogs/fragments/ansible-test-podman-remote.yaml b/changelogs/fragments/ansible-test-podman-remote.yaml new file mode 100644 index 00000000000000..7eb38e110e78f3 --- /dev/null +++ b/changelogs/fragments/ansible-test-podman-remote.yaml @@ -0,0 +1,3 @@ +minor_changes: +- ansible-test - Add support for running container tests with ``podman remote`` + (https://github.com/ansible/ansible/pull/75753) diff --git a/changelogs/fragments/ansible-test-podman-support-containers.yaml b/changelogs/fragments/ansible-test-podman-support-containers.yaml new file mode 100644 index 00000000000000..7838c9243aed9c --- /dev/null +++ b/changelogs/fragments/ansible-test-podman-support-containers.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - fixed support container failures (eg http-test-container) under podman diff --git a/changelogs/fragments/ansible-test-pypi-proxy-fix.yml b/changelogs/fragments/ansible-test-pypi-proxy-fix.yml new file mode 100644 index 00000000000000..5e5e52b150448e --- /dev/null +++ b/changelogs/fragments/ansible-test-pypi-proxy-fix.yml @@ -0,0 +1,4 @@ +bugfixes: + - ansible-test - Perform PyPI proxy configuration after instances are ready and bootstrapping has been completed. + Only target instances are affected, as controller instances were already handled this way. + This avoids proxy configuration errors when target instances are not yet ready for use. diff --git a/changelogs/fragments/ansible-test-remote-acl.yml b/changelogs/fragments/ansible-test-remote-acl.yml new file mode 100644 index 00000000000000..79ff7e514889de --- /dev/null +++ b/changelogs/fragments/ansible-test-remote-acl.yml @@ -0,0 +1,5 @@ +minor_changes: + - ansible-test - Remote FreeBSD instances now have ACLs enabled on the root filesystem. + - ansible-test - Remote Fedora instances now have the ``acl`` package installed. + - ansible-test - Remote Ubuntu instances now have the ``acl`` package installed. + - ansible-test - Remote Alpine instances now have the ``acl`` package installed. diff --git a/changelogs/fragments/ansible-test-remote-become.yml b/changelogs/fragments/ansible-test-remote-become.yml new file mode 100644 index 00000000000000..031cac34ba6a13 --- /dev/null +++ b/changelogs/fragments/ansible-test-remote-become.yml @@ -0,0 +1,3 @@ +minor_changes: + - ansible-test - Alpine remotes now use ``sudo`` for tests, using ``doas`` only for bootstrapping. + - ansible-test - FreeBSD remotes now use ``sudo`` for tests, using ``su`` only for bootstrapping. diff --git a/changelogs/fragments/ansible-test-remote-completion-validation.yml b/changelogs/fragments/ansible-test-remote-completion-validation.yml new file mode 100644 index 00000000000000..640ec2faf81c5f --- /dev/null +++ b/changelogs/fragments/ansible-test-remote-completion-validation.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix internal validation of remote completion configuration. diff --git a/changelogs/fragments/ansible-test-remove-aix-provisioning.yaml b/changelogs/fragments/ansible-test-remove-aix-provisioning.yaml new file mode 100644 index 00000000000000..7a058d4f0c617c --- /dev/null +++ b/changelogs/fragments/ansible-test-remove-aix-provisioning.yaml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Remove support for provisioning remote AIX instances. diff --git a/changelogs/fragments/ansible-test-self-change-classification.yml b/changelogs/fragments/ansible-test-self-change-classification.yml new file mode 100644 index 00000000000000..70970b4540e1ff --- /dev/null +++ b/changelogs/fragments/ansible-test-self-change-classification.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix change detection for ansible-test's own integration tests. diff --git a/changelogs/fragments/ansible-test-shell-features.yml b/changelogs/fragments/ansible-test-shell-features.yml new file mode 100644 index 00000000000000..dbe6890b72ffb2 --- /dev/null +++ b/changelogs/fragments/ansible-test-shell-features.yml @@ -0,0 +1,7 @@ +minor_changes: + - ansible-test - Add support for running non-interactive commands with ``ansible-test shell``. + - ansible-test - Add support for exporting inventory with ``ansible-test shell --export {path}``. + - ansible-test - The ``shell`` command can be used outside a collection if no controller delegation is required. + - ansible-test - Improve consistency of output messages by using stdout or stderr for most output, but not both. +bugfixes: + - ansible-test - Sanity test output with the ``--lint`` option is no longer mixed in with bootstrapping output. diff --git a/changelogs/fragments/ansible-test-subprocess-isolation.yml b/changelogs/fragments/ansible-test-subprocess-isolation.yml new file mode 100644 index 00000000000000..3be259d60899f2 --- /dev/null +++ b/changelogs/fragments/ansible-test-subprocess-isolation.yml @@ -0,0 +1,10 @@ +bugfixes: + - ansible-test - Subprocesses are now isolated from the stdin, stdout and stderr of ansible-test. + This avoids issues with subprocesses tampering with the file descriptors, such as SSH making them non-blocking. + As a result of this change, subprocess output from unit and integration tests on stderr now go to stdout. + - ansible-test - Subprocesses no longer have access to the TTY ansible-test is connected to, if any. + This maintains consistent behavior between local testing and CI systems, which typically do not provide a TTY. + Tests which require a TTY should use pexpect or another mechanism to create a PTY. +minor_changes: + - ansible-test - Blocking mode is now enforced for stdin, stdout and stderr. + If any of these are non-blocking then ansible-test will exit during startup with an error. diff --git a/changelogs/fragments/ansible-test-target-options.yml b/changelogs/fragments/ansible-test-target-options.yml new file mode 100644 index 00000000000000..716a5dca4195b8 --- /dev/null +++ b/changelogs/fragments/ansible-test-target-options.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Prevent ``--target-`` prefixed options for the ``shell`` command from being combined with legacy environment options. diff --git a/changelogs/fragments/ansible-test-tty-output-handling.yml b/changelogs/fragments/ansible-test-tty-output-handling.yml new file mode 100644 index 00000000000000..58031dcd50a81a --- /dev/null +++ b/changelogs/fragments/ansible-test-tty-output-handling.yml @@ -0,0 +1,7 @@ +bugfixes: + - ansible-test - The ``shell`` command no longer requests a TTY when using delegation unless an interactive shell is being used. + An interactive shell is the default behavior when no command is given to pass to the shell. + - ansible-test - The ``shell`` command no longer redirects all output to stdout when running a provided command. + Any command output written to stderr will be mixed with the stderr output from ansible-test. + - ansible-test - Delegation for commands which generate output for programmatic consumption no longer redirect all output to stdout. + The affected commands and options are ``shell``, ``sanity --lint``, ``sanity --list-tests``, ``integration --list-targets``, ``coverage analyze`` diff --git a/changelogs/fragments/ansible-test-ubuntu-bootstrap-fix.yml b/changelogs/fragments/ansible-test-ubuntu-bootstrap-fix.yml new file mode 100644 index 00000000000000..92666bed73afa4 --- /dev/null +++ b/changelogs/fragments/ansible-test-ubuntu-bootstrap-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix bootstrapping of Python 3.9 on Ubuntu 20.04 remotes. diff --git a/changelogs/fragments/ansible-test-ubuntu-remote.yml b/changelogs/fragments/ansible-test-ubuntu-remote.yml new file mode 100644 index 00000000000000..303f8c1eb52068 --- /dev/null +++ b/changelogs/fragments/ansible-test-ubuntu-remote.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Add support for Ubuntu VMs using the ``--remote`` option. diff --git a/changelogs/fragments/ansible-test-verify-executables.yml b/changelogs/fragments/ansible-test-verify-executables.yml new file mode 100644 index 00000000000000..a1eff95d095ab4 --- /dev/null +++ b/changelogs/fragments/ansible-test-verify-executables.yml @@ -0,0 +1,6 @@ +bugfixes: + - ansible-test - Temporary executables are now verified as executable after creation. + Without this check, path injected scripts may not be found, + typically on systems with ``/tmp`` mounted using the "noexec" option. + This can manifest as a missing Python interpreter, or use of the wrong Python interpreter, as well + as other error conditions. diff --git a/changelogs/fragments/ansible-test-windows-default.yaml b/changelogs/fragments/ansible-test-windows-default.yaml new file mode 100644 index 00000000000000..44979f07f98a52 --- /dev/null +++ b/changelogs/fragments/ansible-test-windows-default.yaml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Add default entry for Windows remotes to be used with unknown versions. diff --git a/changelogs/fragments/ansible_test.yml b/changelogs/fragments/ansible_test.yml new file mode 100644 index 00000000000000..ca4553f1826bf1 --- /dev/null +++ b/changelogs/fragments/ansible_test.yml @@ -0,0 +1,3 @@ +--- +minor_changes: +- ansible-test - handle JSON decode error gracefully in podman environment. diff --git a/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst index 9e143b3f10970d..f40b4913fde516 100644 --- a/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst +++ b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst @@ -96,6 +96,7 @@ There are several other aliases available as well: - ``destructive`` - Requires ``--allow-destructive`` to run without ``--docker`` or ``--remote``. - ``hidden`` - Target is ignored. Usable as a dependency. Automatic for ``setup_`` and ``prepare_`` prefixed targets. +- ``retry/never`` - Target is excluded from retries enabled by the ``--retry-on-error`` option. Unstable -------- diff --git a/docs/docsite/rst/dev_guide/testing_running_locally.rst b/docs/docsite/rst/dev_guide/testing_running_locally.rst index dcf7e6d9f7e397..40910a87ec1fa7 100644 --- a/docs/docsite/rst/dev_guide/testing_running_locally.rst +++ b/docs/docsite/rst/dev_guide/testing_running_locally.rst @@ -2,44 +2,329 @@ .. _testing_running_locally: -*************** -Testing Ansible -*************** +******************************* +Testing Ansible and Collections +******************************* -This document describes how to: - -* Run tests locally using ``ansible-test`` -* Extend +This document describes how to run tests using ``ansible-test``. .. contents:: :local: -Requirements -============ +Setup +===== -There are no special requirements for running ``ansible-test`` on Python 2.7 or later. -The ``argparse`` package is required for Python 2.6. -The requirements for each ``ansible-test`` command are covered later. +Before running ``ansible-test``, set up your environment for :ref:`Testing an Ansible Collection` or +:ref:`Testing ansible-core`, depending on which scenario applies to you. +.. warning:: -Test Environments -================= + If you use ``git`` for version control, make sure the files you are working with are not ignored by ``git``. + If they are, ``ansible-test`` will ignore them as well. + +Testing an Ansible Collection +----------------------------- + +If you are testing an Ansible Collection, you need a copy of the collection, preferably a git clone. +For example, to work with the ``community.windows`` collection, follow these steps: + +1. Clone the collection you want to test into a valid collection root: + + .. code-block:: shell + + git clone https://github.com/ansible-collections/community.windows ~/dev/ansible_collections/community/windows + + .. important:: + + The path must end with ``/ansible_collections/{collection_namespace}/{collection_name}`` where + ``{collection_namespace}`` is the namespace of the collection and ``{collection_name}`` is the collection name. + +2. Clone any collections on which the collection depends: + + .. code-block:: shell + + git clone https://github.com/ansible-collections/ansible.windows ~/dev/ansible_collections/ansible/windows + + .. important:: + + If your collection has any dependencies on other collections, they must be in the same collection root, since + ``ansible-test`` will not use your configured collection roots (or other Ansible configuration). + + .. note:: + + See the collection's ``galaxy.yml`` for a list of possible dependencies. + +3. Switch to the directory where the collection to test resides: + + .. code-block:: shell + + cd ~/dev/ansible_collections/community/windows + +Testing ``ansible-core`` +------------------------ + +If you are testing ``ansible-core`` itself, you need a copy of the ``ansible-core`` source code, preferably a git clone. +Having an installed copy of ``ansible-core`` is not sufficient or required. +For example, to work with the ``ansible-core`` source cloned from GitHub, follow these steps: + +1. Clone the ``ansible-core`` repository: + + .. code-block:: shell + + git clone https://github.com/ansible/ansible ~/dev/ansible + +2. Switch to the directory where the ``ansible-core`` source resides: + + .. code-block:: shell + + cd ~/dev/ansible + +3. Add ``ansible-core`` programs to your ``PATH``: + + .. code-block:: shell + + source hacking/env-setup + + .. note:: + + You can skip this step if you only need to run ``ansible-test``, and not other ``ansible-core`` programs. + In that case, simply run ``bin/ansible-test`` from the root of the ``ansible-core`` source. + + .. caution:: + + If you have an installed version of ``ansible-core`` and are trying to run ``ansible-test`` from your ``PATH``, + make sure the program found by your shell is the one from the ``ansible-core`` source: + + .. code-block:: shell + + which ansible-test + +Commands +======== + +The most commonly used test commands are: + +* ``ansible-test sanity`` - Run sanity tests (mostly linters and static analysis). +* ``ansible-test integration`` - Run integration tests. +* ``ansible-test units`` - Run unit tests. + +Run ``ansible-test --help`` to see a complete list of available commands. + +.. note:: + + For detailed help on a specific command, add the ``--help`` option after the command. + +Environments +============ Most ``ansible-test`` commands support running in one or more isolated test environments to simplify testing. +Containers +---------- + +Containers are recommended for running sanity, unit and integration tests, since they provide consistent environments. +Unit tests will be run with network isolation, which avoids unintentional dependencies on network resources. + +The ``--docker`` option runs tests in a container using either Docker or Podman. + +.. note:: + + If both Docker and Podman are installed, Docker will be used. + To override this, set the environment variable ``ANSIBLE_TEST_PREFER_PODMAN`` to any non-empty value. + +Choosing a container +^^^^^^^^^^^^^^^^^^^^ + +Without an additional argument, the ``--docker`` option uses the ``default`` container. +To use another container, specify it immediately after the ``--docker`` option. + +.. note:: + + The ``default`` container is recommended for all sanity and unit tests. + +To see the list of supported containers, use the ``--help`` option with the ``ansible-test`` command you want to use. + +.. note:: + + The list of available containers is dependent on the ``ansible-test`` command you are using. + +You can also specify your own container. +When doing so, you will need to indicate the Python version in the container with the ``--python`` option. + +Custom containers +""""""""""""""""" + +When building custom containers, keep in mind the following requirements: + +* The ``USER`` should be ``root``. +* Use an ``init`` process, such as ``systemd``. +* Include ``sshd`` and accept connections on the default port of ``22``. +* Include a POSIX compatible ``sh`` shell which can be found on ``PATH``. +* Include a ``sleep`` utility which runs as a subprocess. +* Include a supported version of Python. +* Avoid using the ``VOLUME`` statement. + +Docker and SELinux +^^^^^^^^^^^^^^^^^^ + +Using Docker on a host with SELinux may require setting the system in permissive mode. +Consider using Podman instead. + +Docker Desktop with WSL2 +^^^^^^^^^^^^^^^^^^^^^^^^ + +These instructions explain how to use ``ansible-test`` with WSL2 and Docker Desktop *without* ``systemd`` support. -Remote ------- +.. note:: -The ``--remote`` option runs tests in a cloud hosted environment. -An API key is required to use this feature. + If your WSL2 environment includes ``systemd`` support, these steps are not required. - Recommended for integration tests. +Configuration requirements +"""""""""""""""""""""""""" -See the `list of supported platforms and versions `_ for additional details. +1. Open Docker Desktop and go to the **Settings** screen. +2. On the the **General** tab: -Environment Variables ---------------------- + a. Uncheck the **Start Docker Desktop when you log in** checkbox. + b. Check the **Use the WSL 2 based engine** checkbox. + +3. On the **Resources** tab under the **WSL Integration** section: + + a. Enable distros you want to use under the **Enable integration with additional distros** section. + +4. Click **Apply and restart** if changes were made. + +Setup instructions +"""""""""""""""""" + +.. note:: + + If all WSL instances have been stopped, these changes will need to be re-applied. + +1. Verify Docker Desktop is properly configured (see :ref:`Configuration requirements`). +2. Quit Docker Desktop if it is running: + + a. Right click the **Docker Desktop** taskbar icon. + b. Click the **Quit Docker Desktop** option. + +3. Stop any running WSL instances with the command: + + .. code-block:: shell + + wsl --shutdown + +4. Verify all WSL instances have stopped with the command: + + .. code-block:: shell + + wsl -l -v + +5. Start a WSL instance and perform the following steps as ``root``: + + a. Verify the ``systemd`` subsystem is not registered: + + a. Check for the ``systemd`` cgroup hierarchy with the following command: + + .. code-block:: shell + + grep systemd /proc/self/cgroup + + b. If any matches are found, re-check the :ref:`Configuration requirements` and follow the + :ref:`Setup instructions` again. + + b. Mount the ``systemd`` cgroup hierarchy with the following commands: + + .. code-block:: shell + + mkdir /sys/fs/cgroup/systemd + mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + +6. Start Docker Desktop. + +You should now be able to use ``ansible-test`` with the ``--docker`` option. + +Linux cgroup configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + + These changes will need to be re-applied each time the container host is booted. + +For certain container hosts and container combinations, additional setup on the container host may be required. +In these situations ``ansible-test`` will report an error and provide additional instructions to run as ``root``: + +.. code-block:: shell + + mkdir /sys/fs/cgroup/systemd + mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + +If you are using rootless Podman, an additional command must be run, also as ``root``. +Make sure to substitute your user and group for ``{user}`` and ``{group}`` respectively: + +.. code-block:: shell + + chown -R {user}:{group} /sys/fs/cgroup/systemd + +Podman +"""""" + +When using Podman, you may need to stop existing Podman processes after following the :ref:`Linux cgroup configuration` +instructions. Otherwise Podman may be unable to see the new mount point. + +You can check to see if Podman is running by looking for ``podman`` and ``catatonit`` processes. + +Remote virtual machines +----------------------- + +Remote virtual machines are recommended for running integration tests not suitable for execution in containers. + +The ``--remote`` option runs tests in a cloud hosted ephemeral virtual machine. + +.. note:: + + An API key is required to use this feature, unless running under an approved Azure Pipelines organization. + +To see the list of supported systems, use the ``--help`` option with the ``ansible-test`` command you want to use. + +.. note:: + + The list of available systems is dependent on the ``ansible-test`` command you are using. + +Python virtual environments +--------------------------- + +Python virtual environments provide a simple way to achieve isolation from the system and user Python environments. +They are recommended for unit and integration tests when the ``--docker`` and ``--remote`` options cannot be used. + +The ``--venv`` option runs tests in a virtual environment managed by ``ansible-test``. +Requirements are automatically installed before tests are run. + +Composite environment arguments +------------------------------- + +The environment arguments covered in this document are sufficient for most use cases. +However, some scenarios may require the additional flexibility offered by composite environment arguments. + +The ``--controller`` and ``--target`` options are alternatives to the ``--docker``, ``--remote`` and ``--venv`` options. + +.. note:: + + When using the ``shell`` command, the ``--target`` option is replaced by three platform specific options. + +Add the ``--help`` option to your ``ansible-test`` command to learn more about the composite environment arguments. + +Additional Requirements +======================= + +Some ``ansible-test`` commands have additional requirements. +You can use the ``--requirements`` option to automatically install them. + +.. note:: + + When using a test environment managed by ``ansible-test`` the ``--requirements`` option is usually unnecessary. + +Environment variables +===================== When using environment variables to manipulate tests there some limitations to keep in mind. Environment variables are: @@ -51,16 +336,15 @@ When using environment variables to manipulate tests there some limitations to k and the tests executed. This is useful for debugging tests inside a container by following the :ref:`Debugging AnsibleModule-based modules ` instructions. -Interactive Shell +Interactive shell ================= Use the ``ansible-test shell`` command to get an interactive shell in the same environment used to run tests. Examples: * ``ansible-test shell --docker`` - Open a shell in the default docker container. -* ``ansible-test shell --venv --python 3.6`` - Open a shell in a Python 3.6 virtual environment. - +* ``ansible-test shell --venv --python 3.10`` - Open a shell in a Python 3.10 virtual environment. -Code Coverage +Code coverage ============= Code coverage reports make it easy to identify untested code for which more tests should @@ -72,22 +356,17 @@ aren't using the ``--venv`` or ``--docker`` options which create an isolated pyt environment then you may have to use the ``--requirements`` option to ensure that the correct version of the coverage module is installed: -.. code-block:: shell-session +.. code-block:: shell ansible-test coverage erase ansible-test units --coverage apt ansible-test integration --coverage aws_lambda ansible-test coverage html - Reports can be generated in several different formats: * ``ansible-test coverage report`` - Console report. * ``ansible-test coverage html`` - HTML report. * ``ansible-test coverage xml`` - XML report. -To clear data between test runs, use the ``ansible-test coverage erase`` command. For a full list of features see the online help: - -.. code-block:: shell-session - - ansible-test coverage --help +To clear data between test runs, use the ``ansible-test coverage erase`` command. diff --git a/test/integration/targets/ansiballz_python/aliases b/test/integration/targets/ansiballz_python/aliases index e2c8fd39561029..7ae73ab926d41b 100644 --- a/test/integration/targets/ansiballz_python/aliases +++ b/test/integration/targets/ansiballz_python/aliases @@ -1,3 +1,2 @@ shippable/posix/group1 -skip/aix context/target diff --git a/test/integration/targets/ansible-test-config-invalid/aliases b/test/integration/targets/ansible-test-config-invalid/aliases new file mode 100644 index 00000000000000..193276cc9e5df3 --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/config.yml b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/config.yml new file mode 100644 index 00000000000000..9977a2836c1a0f --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/config.yml @@ -0,0 +1 @@ +invalid diff --git a/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/aliases b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/aliases new file mode 100644 index 00000000000000..1af1cf90b6a198 --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/aliases @@ -0,0 +1 @@ +context/controller diff --git a/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/runme.sh b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/runme.sh new file mode 100755 index 00000000000000..f1f641af19bf62 --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/integration/targets/test/runme.sh @@ -0,0 +1 @@ +#!/usr/bin/env bash diff --git a/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py new file mode 100644 index 00000000000000..06e7782e57d6b0 --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py @@ -0,0 +1,2 @@ +def test_me(): + pass diff --git a/test/integration/targets/ansible-test-config-invalid/runme.sh b/test/integration/targets/ansible-test-config-invalid/runme.sh new file mode 100755 index 00000000000000..6ff2d4067bc2f4 --- /dev/null +++ b/test/integration/targets/ansible-test-config-invalid/runme.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Make sure that ansible-test continues to work when content config is invalid. + +set -eu + +source ../collection/setup.sh + +set -x + +ansible-test sanity --test import --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --venv -v +ansible-test units --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --venv -v +ansible-test integration --color --venv -v diff --git a/test/integration/targets/ansible-test-config/aliases b/test/integration/targets/ansible-test-config/aliases new file mode 100644 index 00000000000000..193276cc9e5df3 --- /dev/null +++ b/test/integration/targets/ansible-test-config/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-config/ansible_collections/ns/col/plugins/module_utils/test.py b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/plugins/module_utils/test.py new file mode 100644 index 00000000000000..962dba2b49332a --- /dev/null +++ b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/plugins/module_utils/test.py @@ -0,0 +1,14 @@ +import sys +import os + + +def version_to_str(value): + return '.'.join(str(v) for v in value) + + +controller_min_python_version = tuple(int(v) for v in os.environ['ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION'].split('.')) +current_python_version = sys.version_info[:2] + +if current_python_version < controller_min_python_version: + raise Exception('Current Python version %s is lower than the minimum controller Python version of %s. ' + 'Did the collection config get ignored?' % (version_to_str(current_python_version), version_to_str(controller_min_python_version))) diff --git a/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/config.yml b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/config.yml new file mode 100644 index 00000000000000..7772d7d2023f48 --- /dev/null +++ b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/config.yml @@ -0,0 +1,2 @@ +modules: + python_requires: controller # allow tests to pass when run against a Python version not supported by the controller diff --git a/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py new file mode 100644 index 00000000000000..b320a15aa70e7f --- /dev/null +++ b/test/integration/targets/ansible-test-config/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_test.py @@ -0,0 +1,5 @@ +from ansible_collections.ns.col.plugins.module_utils import test + + +def test_me(): + assert test diff --git a/test/integration/targets/ansible-test-config/runme.sh b/test/integration/targets/ansible-test-config/runme.sh new file mode 100755 index 00000000000000..9636d04daad0ea --- /dev/null +++ b/test/integration/targets/ansible-test-config/runme.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Make sure that ansible-test is able to parse collection config when using a venv. + +set -eu + +source ../collection/setup.sh + +set -x + +# On systems with a Python version below the minimum controller Python version, such as the default container, this test +# will verify that the content config is working properly after delegation. Otherwise it will only verify that no errors +# occur while trying to access content config (such as missing requirements). + +ansible-test sanity --test import --color --venv -v +ansible-test units --color --venv -v diff --git a/test/integration/targets/ansible-test-container/aliases b/test/integration/targets/ansible-test-container/aliases new file mode 100644 index 00000000000000..65a0509311d96c --- /dev/null +++ b/test/integration/targets/ansible-test-container/aliases @@ -0,0 +1,5 @@ +shippable/posix/group6 +context/controller +needs/root +destructive +retry/never # tests on some platforms run too long to make retries useful diff --git a/test/integration/targets/ansible-test-container/runme.py b/test/integration/targets/ansible-test-container/runme.py new file mode 100755 index 00000000000000..6ca068d0e309e6 --- /dev/null +++ b/test/integration/targets/ansible-test-container/runme.py @@ -0,0 +1,1102 @@ +#!/usr/bin/env python +"""Test suite used to verify ansible-test is able to run its containers on various container hosts.""" + +from __future__ import annotations + +import abc +import dataclasses +import datetime +import errno +import functools +import json +import os +import pathlib +import pwd +import re +import secrets +import shlex +import shutil +import signal +import subprocess +import sys +import tempfile +import time +import typing as t + +UNPRIVILEGED_USER_NAME = 'ansible-test' +CGROUP_SYSTEMD = pathlib.Path('/sys/fs/cgroup/systemd') +LOG_PATH = pathlib.Path('/tmp/results') + +# The value of /proc/*/loginuid when it is not set. +# It is a reserved UID, which is the maximum 32-bit unsigned integer value. +# See: https://access.redhat.com/solutions/25404 +LOGINUID_NOT_SET = 4294967295 + +UID = os.getuid() + +try: + LOGINUID = int(pathlib.Path('/proc/self/loginuid').read_text()) + LOGINUID_MISMATCH = LOGINUID != LOGINUID_NOT_SET and LOGINUID != UID +except FileNotFoundError: + LOGINUID = None + LOGINUID_MISMATCH = False + + +def main() -> None: + """Main program entry point.""" + display.section('Startup check') + + try: + bootstrap_type = pathlib.Path('/etc/ansible-test.bootstrap').read_text().strip() + except FileNotFoundError: + bootstrap_type = 'undefined' + + display.info(f'Bootstrap type: {bootstrap_type}') + + if bootstrap_type != 'remote': + display.warning('Skipping destructive test on system which is not an ansible-test remote provisioned instance.') + return + + display.info(f'UID: {UID} / {LOGINUID}') + + if UID != 0: + raise Exception('This test must be run as root.') + + if not LOGINUID_MISMATCH: + if LOGINUID is None: + display.warning('Tests involving loginuid mismatch will be skipped on this host since it does not have audit support.') + elif LOGINUID == LOGINUID_NOT_SET: + display.warning('Tests involving loginuid mismatch will be skipped on this host since it is not set.') + elif LOGINUID == 0: + raise Exception('Use sudo, su, etc. as a non-root user to become root before running this test.') + else: + raise Exception() + + display.section(f'Bootstrapping {os_release}') + + bootstrapper = Bootstrapper.init() + bootstrapper.run() + + result_dir = LOG_PATH + + if result_dir.exists(): + shutil.rmtree(result_dir) + + result_dir.mkdir() + result_dir.chmod(0o777) + + scenarios = get_test_scenarios() + results = [run_test(scenario) for scenario in scenarios] + error_total = 0 + + for name in sorted(result_dir.glob('*.log')): + lines = name.read_text().strip().splitlines() + error_count = len([line for line in lines if line.startswith('FAIL: ')]) + error_total += error_count + + display.section(f'Log ({error_count=}/{len(lines)}): {name.name}') + + for line in lines: + if line.startswith('FAIL: '): + display.show(line, display.RED) + else: + display.show(line) + + error_count = len([result for result in results if result.message]) + error_total += error_count + + duration = datetime.timedelta(seconds=int(sum(result.duration.total_seconds() for result in results))) + + display.section(f'Test Results ({error_count=}/{len(results)}) [{duration}]') + + for result in results: + notes = f' ' if result.cleanup else '' + + if result.cgroup_dirs: + notes += f' ' + + notes += f' [{result.duration}]' + + if result.message: + display.show(f'FAIL: {result.scenario} {result.message}{notes}', display.RED) + elif result.duration.total_seconds() >= 90: + display.show(f'SLOW: {result.scenario}{notes}', display.YELLOW) + else: + display.show(f'PASS: {result.scenario}{notes}') + + if error_total: + sys.exit(1) + + +def get_test_scenarios() -> list[TestScenario]: + """Generate and return a list of test scenarios.""" + + supported_engines = ('docker', 'podman') + available_engines = [engine for engine in supported_engines if shutil.which(engine)] + + if not available_engines: + raise ApplicationError(f'No supported container engines found: {", ".join(supported_engines)}') + + completion_lines = pathlib.Path(os.environ['PYTHONPATH'], '../test/lib/ansible_test/_data/completion/docker.txt').read_text().splitlines() + + # TODO: consider including testing for the collection default image + entries = {name: value for name, value in (parse_completion_entry(line) for line in completion_lines) if name != 'default'} + + unprivileged_user = User.get(UNPRIVILEGED_USER_NAME) + + scenarios: list[TestScenario] = [] + + for container_name, settings in entries.items(): + image = settings['image'] + cgroup = settings.get('cgroup', 'v1-v2') + + if container_name == 'centos6' and os_release.id == 'alpine': + # Alpine kernels do not emulate vsyscall by default, which causes the centos6 container to fail during init. + # See: https://unix.stackexchange.com/questions/478387/running-a-centos-docker-image-on-arch-linux-exits-with-code-139 + # Other distributions enable settings which trap vsyscall by default. + # See: https://www.kernelconfig.io/config_legacy_vsyscall_xonly + # See: https://www.kernelconfig.io/config_legacy_vsyscall_emulate + continue + + for engine in available_engines: + # TODO: figure out how to get tests passing using docker without disabling selinux + disable_selinux = os_release.id == 'fedora' and engine == 'docker' and cgroup != 'none' + expose_cgroup_v1 = cgroup == 'v1-only' and get_docker_info(engine).cgroup_version != 1 + debug_systemd = cgroup != 'none' + + # The sleep+pkill used to support the cgroup probe causes problems with the centos6 container. + # It results in sshd connections being refused or reset for many, but not all, container instances. + # The underlying cause of this issue is unknown. + probe_cgroups = container_name != 'centos6' + + # The default RHEL 9 crypto policy prevents use of SHA-1. + # This results in SSH errors with centos6 containers: ssh_dispatch_run_fatal: Connection to 1.2.3.4 port 22: error in libcrypto + # See: https://access.redhat.com/solutions/6816771 + enable_sha1 = os_release.id == 'rhel' and os_release.version_id.startswith('9.') and container_name == 'centos6' + + if cgroup != 'none' and get_docker_info(engine).cgroup_version == 1 and not have_cgroup_systemd(): + expose_cgroup_v1 = True # the host uses cgroup v1 but there is no systemd cgroup and the container requires cgroup support + + user_scenarios = [ + # TODO: test rootless docker + UserScenario(ssh=unprivileged_user), + ] + + if engine == 'podman': + user_scenarios.append(UserScenario(ssh=ROOT_USER)) + + # TODO: test podman remote on Alpine and Ubuntu hosts + # TODO: combine remote with ssh using different unprivileged users + if os_release.id not in ('alpine', 'ubuntu'): + user_scenarios.append(UserScenario(remote=unprivileged_user)) + + if LOGINUID_MISMATCH: + user_scenarios.append(UserScenario()) + + for user_scenario in user_scenarios: + scenarios.append( + TestScenario( + user_scenario=user_scenario, + engine=engine, + container_name=container_name, + image=image, + disable_selinux=disable_selinux, + expose_cgroup_v1=expose_cgroup_v1, + enable_sha1=enable_sha1, + debug_systemd=debug_systemd, + probe_cgroups=probe_cgroups, + ) + ) + + return scenarios + + +def run_test(scenario: TestScenario) -> TestResult: + """Run a test scenario and return the test results.""" + display.section(f'Testing {scenario} Started') + + start = time.monotonic() + + integration = ['ansible-test', 'integration', 'split'] + integration_options = ['--target', f'docker:{scenario.container_name}', '--color', '--truncate', '0', '-v'] + target_only_options = [] + + if scenario.debug_systemd: + integration_options.append('--dev-systemd-debug') + + if scenario.probe_cgroups: + target_only_options = ['--dev-probe-cgroups', str(LOG_PATH)] + + commands = [ + # The cgroup probe is only performed for the first test of the target. + # There's no need to repeat the probe again for the same target. + # The controller will be tested separately as a target. + # This ensures that both the probe and no-probe code paths are functional. + [*integration, *integration_options, *target_only_options], + # For the split test we'll use alpine3 as the controller. There are two reasons for this: + # 1) It doesn't require the cgroup v1 hack, so we can test a target that doesn't need that. + # 2) It doesn't require disabling selinux, so we can test a target that doesn't need that. + [*integration, '--controller', 'docker:alpine3', *integration_options], + ] + + common_env: dict[str, str] = {} + test_env: dict[str, str] = {} + + if scenario.engine == 'podman': + if scenario.user_scenario.remote: + common_env.update( + # Podman 4.3.0 has a regression which requires a port for remote connections to work. + # See: https://github.com/containers/podman/issues/16509 + CONTAINER_HOST=f'ssh://{scenario.user_scenario.remote.name}@localhost:22' + f'/run/user/{scenario.user_scenario.remote.pwnam.pw_uid}/podman/podman.sock', + CONTAINER_SSHKEY=str(pathlib.Path('~/.ssh/id_rsa').expanduser()), # TODO: add support for ssh + remote when the ssh user is not root + ) + + test_env.update(ANSIBLE_TEST_PREFER_PODMAN='1') + + test_env.update(common_env) + + if scenario.user_scenario.ssh: + client_become_cmd = ['ssh', f'{scenario.user_scenario.ssh.name}@localhost'] + test_commands = [client_become_cmd + [f'cd ~/ansible; {format_env(test_env)}{sys.executable} bin/{shlex.join(command)}'] for command in commands] + else: + client_become_cmd = ['sh', '-c'] + test_commands = [client_become_cmd + [f'{format_env(test_env)}{shlex.join(command)}'] for command in commands] + + prime_storage_command = [] + + if scenario.engine == 'podman' and scenario.user_scenario.actual.name == UNPRIVILEGED_USER_NAME: + # When testing podman we need to make sure that the overlay filesystem is used instead of vfs. + # Using the vfs filesystem will result in running out of disk space during the tests. + # To change the filesystem used, the existing storage directory must be removed before "priming" the storage database. + # + # Without this change the following message may be displayed: + # + # User-selected graph driver "overlay" overwritten by graph driver "vfs" from database - delete libpod local files to resolve + # + # However, with this change it may be replaced with the following message: + # + # User-selected graph driver "vfs" overwritten by graph driver "overlay" from database - delete libpod local files to resolve + + actual_become_cmd = ['ssh', f'{scenario.user_scenario.actual.name}@localhost'] + prime_storage_command = actual_become_cmd + prepare_prime_podman_storage() + + message = '' + + if scenario.expose_cgroup_v1: + prepare_cgroup_systemd(scenario.user_scenario.actual.name, scenario.engine) + + try: + if prime_storage_command: + retry_command(lambda: run_command(*prime_storage_command), retry_any_error=True) + + if scenario.disable_selinux: + run_command('setenforce', 'permissive') + + if scenario.enable_sha1: + run_command('update-crypto-policies', '--set', 'DEFAULT:SHA1') + + for test_command in test_commands: + retry_command(lambda: run_command(*test_command)) + except SubprocessError as ex: + message = str(ex) + display.error(f'{scenario} {message}') + finally: + if scenario.enable_sha1: + run_command('update-crypto-policies', '--set', 'DEFAULT') + + if scenario.disable_selinux: + run_command('setenforce', 'enforcing') + + if scenario.expose_cgroup_v1: + dirs = remove_cgroup_systemd() + else: + dirs = list_group_systemd() + + cleanup_command = [scenario.engine, 'rmi', '-f', scenario.image] + + try: + retry_command(lambda: run_command(*client_become_cmd + [f'{format_env(common_env)}{shlex.join(cleanup_command)}']), retry_any_error=True) + except SubprocessError as ex: + display.error(str(ex)) + + cleanup = cleanup_podman() if scenario.engine == 'podman' else tuple() + + finish = time.monotonic() + duration = datetime.timedelta(seconds=int(finish - start)) + + display.section(f'Testing {scenario} Completed in {duration}') + + return TestResult( + scenario=scenario, + message=message, + cleanup=cleanup, + duration=duration, + cgroup_dirs=tuple(str(path) for path in dirs), + ) + + +def prepare_prime_podman_storage() -> list[str]: + """Partially prime podman storage and return a command to complete the remainder.""" + prime_storage_command = ['rm -rf ~/.local/share/containers; STORAGE_DRIVER=overlay podman pull quay.io/bedrock/alpine:3.16.2'] + + test_containers = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local/share/containers').expanduser() + + if test_containers.is_dir(): + # First remove the directory as root, since the user may not have permissions on all the files. + # The directory will be removed again after login, before initializing the database. + rmtree(test_containers) + + return prime_storage_command + + +def cleanup_podman() -> tuple[str, ...]: + """Cleanup podman processes and files on disk.""" + cleanup = [] + + for remaining in range(3, -1, -1): + processes = [(int(item[0]), item[1]) for item in + [item.split(maxsplit=1) for item in run_command('ps', '-A', '-o', 'pid,comm', capture=True).stdout.splitlines()] + if pathlib.Path(item[1].split()[0]).name in ('catatonit', 'podman', 'conmon')] + + if not processes: + break + + for pid, name in processes: + display.info(f'Killing "{name}" ({pid}) ...') + + try: + os.kill(pid, signal.SIGTERM if remaining > 1 else signal.SIGKILL) + except ProcessLookupError: + pass + + cleanup.append(name) + + time.sleep(1) + else: + raise Exception('failed to kill all matching processes') + + uid = pwd.getpwnam(UNPRIVILEGED_USER_NAME).pw_uid + + container_tmp = pathlib.Path(f'/tmp/containers-user-{uid}') + podman_tmp = pathlib.Path(f'/tmp/podman-run-{uid}') + + user_config = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.config').expanduser() + user_local = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.local').expanduser() + + if container_tmp.is_dir(): + rmtree(container_tmp) + + if podman_tmp.is_dir(): + rmtree(podman_tmp) + + if user_config.is_dir(): + rmtree(user_config) + + if user_local.is_dir(): + rmtree(user_local) + + return tuple(sorted(set(cleanup))) + + +def have_cgroup_systemd() -> bool: + """Return True if the container host has a systemd cgroup.""" + return pathlib.Path(CGROUP_SYSTEMD).is_dir() + + +def prepare_cgroup_systemd(username: str, engine: str) -> None: + """Prepare the systemd cgroup.""" + CGROUP_SYSTEMD.mkdir() + + run_command('mount', 'cgroup', '-t', 'cgroup', str(CGROUP_SYSTEMD), '-o', 'none,name=systemd,xattr', capture=True) + + if engine == 'podman': + run_command('chown', '-R', f'{username}:{username}', str(CGROUP_SYSTEMD)) + + run_command('find', str(CGROUP_SYSTEMD), '-type', 'd', '-exec', 'ls', '-l', '{}', ';') + + +def list_group_systemd() -> list[pathlib.Path]: + """List the systemd cgroup.""" + dirs = set() + + for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False): + for dirname in dirnames: + target_path = pathlib.Path(dirpath, dirname) + display.info(f'dir: {target_path}') + dirs.add(target_path) + + return sorted(dirs) + + +def remove_cgroup_systemd() -> list[pathlib.Path]: + """Remove the systemd cgroup.""" + dirs = set() + + for sleep_seconds in range(1, 10): + try: + for dirpath, dirnames, filenames in os.walk(CGROUP_SYSTEMD, topdown=False): + for dirname in dirnames: + target_path = pathlib.Path(dirpath, dirname) + display.info(f'rmdir: {target_path}') + dirs.add(target_path) + target_path.rmdir() + except OSError as ex: + if ex.errno != errno.EBUSY: + raise + + error = str(ex) + else: + break + + display.warning(f'{error} -- sleeping for {sleep_seconds} second(s) before trying again ...') # pylint: disable=used-before-assignment + + time.sleep(sleep_seconds) + + time.sleep(1) # allow time for cgroups to be fully removed before unmounting + + run_command('umount', str(CGROUP_SYSTEMD)) + + CGROUP_SYSTEMD.rmdir() + + time.sleep(1) # allow time for cgroup hierarchy to be removed after unmounting + + cgroup = pathlib.Path('/proc/self/cgroup').read_text() + + if 'systemd' in cgroup: + raise Exception('systemd hierarchy detected') + + return sorted(dirs) + + +def rmtree(path: pathlib.Path) -> None: + """Wrapper around shutil.rmtree with additional error handling.""" + for retries in range(10, -1, -1): + try: + display.info(f'rmtree: {path} ({retries} attempts remaining) ... ') + shutil.rmtree(path) + except Exception: + if not path.exists(): + display.info(f'rmtree: {path} (not found)') + return + + if not path.is_dir(): + display.info(f'rmtree: {path} (not a directory)') + return + + if retries: + continue + + raise + else: + display.info(f'rmtree: {path} (done)') + return + + +def format_env(env: dict[str, str]) -> str: + """Format an env dict for injection into a shell command and return the resulting string.""" + if env: + return ' '.join(f'{shlex.quote(key)}={shlex.quote(value)}' for key, value in env.items()) + ' ' + + return '' + + +class DockerInfo: + """The results of `docker info` for the container runtime.""" + + def __init__(self, data: dict[str, t.Any]) -> None: + self.data = data + + @property + def cgroup_version(self) -> int: + """The cgroup version of the container host.""" + data = self.data + host = data.get('host') + + if host: + version = int(host['cgroupVersion'].lstrip('v')) # podman + else: + version = int(data['CgroupVersion']) # docker + + return version + + +@functools.lru_cache +def get_docker_info(engine: str) -> DockerInfo: + """Return info for the current container runtime. The results are cached.""" + return DockerInfo(json.loads(run_command(engine, 'info', '--format', '{{ json . }}', capture=True).stdout)) + + +@dataclasses.dataclass(frozen=True) +class User: + name: str + pwnam: pwd.struct_passwd + + @classmethod + def get(cls, name: str) -> User: + return User( + name=name, + pwnam=pwd.getpwnam(name), + ) + + +@dataclasses.dataclass(frozen=True) +class UserScenario: + ssh: User = None + remote: User = None + + @property + def actual(self) -> User: + return self.remote or self.ssh or ROOT_USER + + +@dataclasses.dataclass(frozen=True) +class TestScenario: + user_scenario: UserScenario + engine: str + container_name: str + image: str + disable_selinux: bool + expose_cgroup_v1: bool + enable_sha1: bool + debug_systemd: bool + probe_cgroups: bool + + @property + def tags(self) -> tuple[str, ...]: + tags = [] + + if self.user_scenario.ssh: + tags.append(f'ssh: {self.user_scenario.ssh.name}') + + if self.user_scenario.remote: + tags.append(f'remote: {self.user_scenario.remote.name}') + + if self.disable_selinux: + tags.append('selinux: permissive') + + if self.expose_cgroup_v1: + tags.append('cgroup: v1') + + if self.enable_sha1: + tags.append('sha1: enabled') + + return tuple(tags) + + @property + def tag_label(self) -> str: + return ' '.join(f'[{tag}]' for tag in self.tags) + + def __str__(self): + return f'[{self.container_name}] ({self.engine}) {self.tag_label}'.strip() + + +@dataclasses.dataclass(frozen=True) +class TestResult: + scenario: TestScenario + message: str + cleanup: tuple[str, ...] + duration: datetime.timedelta + cgroup_dirs: tuple[str, ...] + + +def parse_completion_entry(value: str) -> tuple[str, dict[str, str]]: + """Parse the given completion entry, returning the entry name and a dictionary of key/value settings.""" + values = value.split() + + name = values[0] + data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]} + + return name, data + + +@dataclasses.dataclass(frozen=True) +class SubprocessResult: + """Result from execution of a subprocess.""" + + command: list[str] + stdout: str + stderr: str + status: int + + +class ApplicationError(Exception): + """An application error.""" + + def __init__(self, message: str) -> None: + self.message = message + + super().__init__(message) + + +class SubprocessError(ApplicationError): + """An error from executing a subprocess.""" + + def __init__(self, result: SubprocessResult) -> None: + self.result = result + + message = f'Command `{shlex.join(result.command)}` exited with status: {result.status}' + + stdout = (result.stdout or '').strip() + stderr = (result.stderr or '').strip() + + if stdout: + message += f'\n>>> Standard Output\n{stdout}' + + if stderr: + message += f'\n>>> Standard Error\n{stderr}' + + super().__init__(message) + + +class ProgramNotFoundError(ApplicationError): + """A required program was not found.""" + + def __init__(self, name: str) -> None: + self.name = name + + super().__init__(f'Missing program: {name}') + + +class Display: + """Display interface for sending output to the console.""" + + CLEAR = '\033[0m' + RED = '\033[31m' + GREEN = '\033[32m' + YELLOW = '\033[33m' + BLUE = '\033[34m' + PURPLE = '\033[35m' + CYAN = '\033[36m' + + def __init__(self) -> None: + self.sensitive: set[str] = set() + + def section(self, message: str) -> None: + """Print a section message to the console.""" + self.show(f'==> {message}', color=self.BLUE) + + def subsection(self, message: str) -> None: + """Print a subsection message to the console.""" + self.show(f'--> {message}', color=self.CYAN) + + def fatal(self, message: str) -> None: + """Print a fatal message to the console.""" + self.show(f'FATAL: {message}', color=self.RED) + + def error(self, message: str) -> None: + """Print an error message to the console.""" + self.show(f'ERROR: {message}', color=self.RED) + + def warning(self, message: str) -> None: + """Print a warning message to the console.""" + self.show(f'WARNING: {message}', color=self.PURPLE) + + def info(self, message: str) -> None: + """Print an info message to the console.""" + self.show(f'INFO: {message}', color=self.YELLOW) + + def show(self, message: str, color: str | None = None) -> None: + """Print a message to the console.""" + for item in self.sensitive: + message = message.replace(item, '*' * len(item)) + + print(f'{color or self.CLEAR}{message}{self.CLEAR}', flush=True) + + +def run_module( + module: str, + args: dict[str, t.Any], +) -> SubprocessResult: + """Run the specified Ansible module and return the result.""" + playbook = f''' +- hosts: localhost + gather_facts: no + tasks: + - user: {json.dumps(args)} +''' + + with tempfile.NamedTemporaryFile() as playbook_file: + playbook_file.write(playbook.encode('utf8')) + playbook_file.flush() + + return run_command('ansible-playbook', '-v', playbook_file.name) + + +def retry_command(func: t.Callable[[], SubprocessResult], attempts: int = 3, retry_any_error: bool = False) -> SubprocessResult: + """Run the given command function up to the specified number of attempts when the failure is due to an SSH error.""" + for attempts_remaining in range(attempts - 1, -1, -1): + try: + return func() + except SubprocessError as ex: + if ex.result.command[0] == 'ssh' and ex.result.status == 255 and attempts_remaining: + # SSH connections on our Ubuntu 22.04 host sometimes fail for unknown reasons. + # This retry should allow the test suite to continue, maintaining CI stability. + # TODO: Figure out why local SSH connections sometimes fail during the test run. + display.warning('Command failed due to an SSH error. Waiting a few seconds before retrying.') + time.sleep(3) + continue + + if retry_any_error: + display.warning('Command failed. Waiting a few seconds before retrying.') + time.sleep(3) + continue + + raise + + +def run_command( + *command: str, + data: str | None = None, + stdin: int | t.IO[bytes] | None = None, + env: dict[str, str] | None = None, + capture: bool = False, +) -> SubprocessResult: + """Run the specified command and return the result.""" + stdin = subprocess.PIPE if data else stdin or subprocess.DEVNULL + stdout = subprocess.PIPE if capture else None + stderr = subprocess.PIPE if capture else None + + display.subsection(f'Run command: {shlex.join(command)}') + + try: + with subprocess.Popen(args=command, stdin=stdin, stdout=stdout, stderr=stderr, env=env, text=True) as process: + process_stdout, process_stderr = process.communicate(data) + process_status = process.returncode + except FileNotFoundError: + raise ProgramNotFoundError(command[0]) from None + + result = SubprocessResult( + command=list(command), + stdout=process_stdout, + stderr=process_stderr, + status=process_status, + ) + + if process.returncode != 0: + raise SubprocessError(result) + + return result + + +class Bootstrapper(metaclass=abc.ABCMeta): + """Bootstrapper for remote instances.""" + + @classmethod + def install_podman(cls) -> bool: + """Return True if podman will be installed.""" + return False + + @classmethod + def install_docker(cls) -> bool: + """Return True if docker will be installed.""" + return False + + @classmethod + def usable(cls) -> bool: + """Return True if the bootstrapper can be used, otherwise False.""" + return False + + @classmethod + def init(cls) -> t.Type[Bootstrapper]: + """Return a bootstrapper type appropriate for the current system.""" + for bootstrapper in cls.__subclasses__(): + if bootstrapper.usable(): + return bootstrapper + + display.warning('No supported bootstrapper found.') + return Bootstrapper + + @classmethod + def run(cls) -> None: + """Run the bootstrapper.""" + cls.configure_root_user() + cls.configure_unprivileged_user() + cls.configure_source_trees() + cls.configure_ssh_keys() + cls.configure_podman_remote() + + @classmethod + def configure_root_user(cls) -> None: + """Configure the root user to run tests.""" + root_password_status = run_command('passwd', '--status', 'root', capture=True) + root_password_set = root_password_status.stdout.split()[1] + + if root_password_set not in ('P', 'PS'): + root_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip() + + run_module( + 'user', + dict( + user='root', + password=root_password, + ), + ) + + @classmethod + def configure_unprivileged_user(cls) -> None: + """Configure the unprivileged user to run tests.""" + unprivileged_password = run_command('openssl', 'passwd', '-5', '-stdin', data=secrets.token_hex(8), capture=True).stdout.strip() + + run_module( + 'user', + dict( + user=UNPRIVILEGED_USER_NAME, + password=unprivileged_password, + groups=['docker'] if cls.install_docker() else [], + append=True, + ), + ) + + if os_release.id == 'alpine': + # Most distros handle this automatically, but not Alpine. + # See: https://www.redhat.com/sysadmin/rootless-podman + start = 165535 + end = start + 65535 + id_range = f'{start}-{end}' + + run_command( + 'usermod', + '--add-subuids', + id_range, + '--add-subgids', + id_range, + UNPRIVILEGED_USER_NAME, + ) + + @classmethod + def configure_source_trees(cls): + """Configure the source trees needed to run tests for both root and the unprivileged user.""" + current_ansible = pathlib.Path(os.environ['PYTHONPATH']).parent + + root_ansible = pathlib.Path('~').expanduser() / 'ansible' + test_ansible = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}').expanduser() / 'ansible' + + if current_ansible != root_ansible: + display.info(f'copying {current_ansible} -> {root_ansible} ...') + rmtree(root_ansible) + shutil.copytree(current_ansible, root_ansible) + run_command('chown', '-R', 'root:root', str(root_ansible)) + + display.info(f'copying {current_ansible} -> {test_ansible} ...') + rmtree(test_ansible) + shutil.copytree(current_ansible, test_ansible) + run_command('chown', '-R', f'{UNPRIVILEGED_USER_NAME}:{UNPRIVILEGED_USER_NAME}', str(test_ansible)) + + paths = [pathlib.Path(test_ansible)] + + for root, dir_names, file_names in os.walk(test_ansible): + paths.extend(pathlib.Path(root, dir_name) for dir_name in dir_names) + paths.extend(pathlib.Path(root, file_name) for file_name in file_names) + + user = pwd.getpwnam(UNPRIVILEGED_USER_NAME) + uid = user.pw_uid + gid = user.pw_gid + + for path in paths: + os.chown(path, uid, gid) + + @classmethod + def configure_ssh_keys(cls) -> None: + """Configure SSH keys needed to run tests.""" + user = pwd.getpwnam(UNPRIVILEGED_USER_NAME) + uid = user.pw_uid + gid = user.pw_gid + + current_rsa_pub = pathlib.Path('~/.ssh/id_rsa.pub').expanduser() + + test_authorized_keys = pathlib.Path(f'~{UNPRIVILEGED_USER_NAME}/.ssh/authorized_keys').expanduser() + + test_authorized_keys.parent.mkdir(mode=0o755, parents=True, exist_ok=True) + os.chown(test_authorized_keys.parent, uid, gid) + + shutil.copyfile(current_rsa_pub, test_authorized_keys) + os.chown(test_authorized_keys, uid, gid) + test_authorized_keys.chmod(mode=0o644) + + @classmethod + def configure_podman_remote(cls) -> None: + """Configure podman remote support.""" + # TODO: figure out how to support remote podman without systemd (Alpine) + # TODO: figure out how to support remote podman on Ubuntu + if os_release.id in ('alpine', 'ubuntu'): + return + + # Support podman remote on any host with systemd available. + retry_command(lambda: run_command('ssh', f'{UNPRIVILEGED_USER_NAME}@localhost', 'systemctl', '--user', 'enable', '--now', 'podman.socket')) + run_command('loginctl', 'enable-linger', UNPRIVILEGED_USER_NAME) + + +class DnfBootstrapper(Bootstrapper): + """Bootstrapper for dnf based systems.""" + + @classmethod + def install_podman(cls) -> bool: + """Return True if podman will be installed.""" + return True + + @classmethod + def install_docker(cls) -> bool: + """Return True if docker will be installed.""" + return os_release.id != 'rhel' + + @classmethod + def usable(cls) -> bool: + """Return True if the bootstrapper can be used, otherwise False.""" + return bool(shutil.which('dnf')) + + @classmethod + def run(cls) -> None: + """Run the bootstrapper.""" + # NOTE: Install crun to make it available to podman, otherwise installing moby-engine can cause podman to use runc instead. + packages = ['podman', 'crun'] + + if cls.install_docker(): + packages.append('moby-engine') + + if os_release.id == 'fedora' and os_release.version_id == '36': + # In Fedora 36 the current version of netavark, 1.2.0, causes TCP connect to hang between rootfull containers. + # The previously tested version, 1.1.0, did not have this issue. + # Unfortunately, with the release of 1.2.0 the 1.1.0 package was removed from the repositories. + # Thankfully the 1.0.2 version is available and also works, so we'll use that here until a fixed version is available. + # See: https://github.com/containers/netavark/issues/491 + packages.append('netavark-1.0.2') + + if os_release.id == 'rhel': + # As of the release of RHEL 9.1, installing podman on RHEL 9.0 results in a non-fatal error at install time: + # + # libsemanage.semanage_pipe_data: Child process /usr/libexec/selinux/hll/pp failed with code: 255. (No such file or directory). + # container: libsepol.policydb_read: policydb module version 21 does not match my version range 4-20 + # container: libsepol.sepol_module_package_read: invalid module in module package (at section 0) + # container: Failed to read policy package + # libsemanage.semanage_direct_commit: Failed to compile hll files into cil files. + # (No such file or directory). + # /usr/sbin/semodule: Failed! + # + # Unfortunately this is then fatal when running podman, resulting in no error message and a 127 return code. + # The solution is to update the policycoreutils package *before* installing podman. + # + # NOTE: This work-around can probably be removed once we're testing on RHEL 9.1, as the updated packages should already be installed. + # Unfortunately at this time there is no RHEL 9.1 AMI available (other than the Beta release). + + run_command('dnf', 'update', '-y', 'policycoreutils') + + run_command('dnf', 'install', '-y', *packages) + + if cls.install_docker(): + run_command('systemctl', 'start', 'docker') + + if os_release.id == 'rhel' and os_release.version_id.startswith('8.'): + # RHEL 8 defaults to using runc instead of crun. + # Unfortunately runc seems to have issues with podman remote. + # Specifically, it tends to cause conmon to burn CPU until it reaches the specified exit delay. + # So we'll just change the system default to crun instead. + # Unfortunately we can't do this with the `--runtime` option since that doesn't work with podman remote. + + conf = pathlib.Path('/usr/share/containers/containers.conf').read_text() + + conf = re.sub('^runtime .*', 'runtime = "crun"', conf, flags=re.MULTILINE) + + pathlib.Path('/etc/containers/containers.conf').write_text(conf) + + super().run() + + +class AptBootstrapper(Bootstrapper): + """Bootstrapper for apt based systems.""" + + @classmethod + def install_podman(cls) -> bool: + """Return True if podman will be installed.""" + return not (os_release.id == 'ubuntu' and os_release.version_id == '20.04') + + @classmethod + def install_docker(cls) -> bool: + """Return True if docker will be installed.""" + return True + + @classmethod + def usable(cls) -> bool: + """Return True if the bootstrapper can be used, otherwise False.""" + return bool(shutil.which('apt-get')) + + @classmethod + def run(cls) -> None: + """Run the bootstrapper.""" + apt_env = os.environ.copy() + apt_env.update( + DEBIAN_FRONTEND='noninteractive', + ) + + packages = ['docker.io'] + + if cls.install_podman(): + # NOTE: Install crun to make it available to podman, otherwise installing docker.io can cause podman to use runc instead. + # Using podman rootless requires the `newuidmap` and `slirp4netns` commands. + packages.extend(('podman', 'crun', 'uidmap', 'slirp4netns')) + + run_command('apt-get', 'install', *packages, '-y', '--no-install-recommends', env=apt_env) + + super().run() + + +class ApkBootstrapper(Bootstrapper): + """Bootstrapper for apk based systems.""" + + @classmethod + def install_podman(cls) -> bool: + """Return True if podman will be installed.""" + return True + + @classmethod + def install_docker(cls) -> bool: + """Return True if docker will be installed.""" + return True + + @classmethod + def usable(cls) -> bool: + """Return True if the bootstrapper can be used, otherwise False.""" + return bool(shutil.which('apk')) + + @classmethod + def run(cls) -> None: + """Run the bootstrapper.""" + # The `openssl` package is used to generate hashed passwords. + packages = ['docker', 'podman', 'openssl'] + + run_command('apk', 'add', *packages) + run_command('service', 'docker', 'start') + run_command('modprobe', 'tun') + + super().run() + + +@dataclasses.dataclass(frozen=True) +class OsRelease: + """Operating system identification.""" + + id: str + version_id: str + + @staticmethod + def init() -> OsRelease: + """Detect the current OS release and return the result.""" + lines = run_command('sh', '-c', '. /etc/os-release && echo $ID && echo $VERSION_ID', capture=True).stdout.splitlines() + + result = OsRelease( + id=lines[0], + version_id=lines[1], + ) + + display.show(f'Detected OS "{result.id}" version "{result.version_id}".') + + return result + + +display = Display() +os_release = OsRelease.init() + +ROOT_USER = User.get('root') + +if __name__ == '__main__': + main() diff --git a/test/integration/targets/ansible-test-container/runme.sh b/test/integration/targets/ansible-test-container/runme.sh new file mode 100755 index 00000000000000..56fd669031ecf8 --- /dev/null +++ b/test/integration/targets/ansible-test-container/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eu + +./runme.py diff --git a/test/integration/targets/ansible-test-docker/aliases b/test/integration/targets/ansible-test-docker/aliases index a862ab8b362c78..c389df53d14428 100644 --- a/test/integration/targets/ansible-test-docker/aliases +++ b/test/integration/targets/ansible-test-docker/aliases @@ -1,2 +1,3 @@ shippable/generic/group1 # Runs in the default test container so access to tools like pwsh context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/aliases b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/aliases new file mode 100644 index 00000000000000..1af1cf90b6a198 --- /dev/null +++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/aliases @@ -0,0 +1 @@ +context/controller diff --git a/test/integration/targets/ansible-test-docker/collection-tests/docker.sh b/test/integration/targets/ansible-test-docker/collection-tests/docker.sh deleted file mode 100755 index 69372245049ca5..00000000000000 --- a/test/integration/targets/ansible-test-docker/collection-tests/docker.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -set -eux -o pipefail - -cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" -cd "${WORK_DIR}/ansible_collections/ns/col" - -# common args for all tests -# because we are running in shippable/generic/ we are already in the default docker container -common=(--python "${ANSIBLE_TEST_PYTHON_VERSION}" --venv --venv-system-site-packages --color --truncate 0 "${@}") - -# prime the venv to work around issue with PyYAML detection in ansible-test -ansible-test sanity "${common[@]}" --test ignores - -# tests -ansible-test sanity "${common[@]}" -ansible-test units "${common[@]}" -ansible-test integration "${common[@]}" diff --git a/test/integration/targets/ansible-test-docker/runme.sh b/test/integration/targets/ansible-test-docker/runme.sh index 7c956b4f1588bc..014d36324021bc 100755 --- a/test/integration/targets/ansible-test-docker/runme.sh +++ b/test/integration/targets/ansible-test-docker/runme.sh @@ -1,24 +1,14 @@ #!/usr/bin/env bash -set -eu -o pipefail +source ../collection/setup.sh -# tests must be executed outside of the ansible source tree -# otherwise ansible-test will test the ansible source instead of the test collection -# the temporary directory provided by ansible-test resides within the ansible source tree -tmp_dir=$(mktemp -d) +set -x -trap 'rm -rf "${tmp_dir}"' EXIT +# common args for all tests +# because we are running in shippable/generic/ we are already in the default docker container +common=(--python "${ANSIBLE_TEST_PYTHON_VERSION}" --venv --venv-system-site-packages --color --truncate 0 "${@}") -export TEST_DIR -export WORK_DIR - -TEST_DIR="$PWD" - -for test in collection-tests/*.sh; do - WORK_DIR="${tmp_dir}/$(basename "${test}" ".sh")" - mkdir "${WORK_DIR}" - echo "**********************************************************************" - echo "TEST: ${test}: STARTING" - "${test}" "${@}" || (echo "TEST: ${test}: FAILED" && exit 1) - echo "TEST: ${test}: PASSED" -done +# tests +ansible-test sanity "${common[@]}" +ansible-test units "${common[@]}" +ansible-test integration "${common[@]}" diff --git a/test/integration/targets/ansible-test-integration-targets/aliases b/test/integration/targets/ansible-test-integration-targets/aliases new file mode 100644 index 00000000000000..7741d44451547b --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/aliases @@ -0,0 +1,4 @@ +shippable/posix/group3 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_a/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_a/aliases new file mode 100644 index 00000000000000..c9dc649b4c91be --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_a/aliases @@ -0,0 +1,2 @@ +context/controller +destructive diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_b/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_b/aliases new file mode 100644 index 00000000000000..c9dc649b4c91be --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/destructive_b/aliases @@ -0,0 +1,2 @@ +context/controller +destructive diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_a/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_a/aliases new file mode 100644 index 00000000000000..bd3e3ef16850ee --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_a/aliases @@ -0,0 +1,2 @@ +context/controller +disabled diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_b/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_b/aliases new file mode 100644 index 00000000000000..bd3e3ef16850ee --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/disabled_b/aliases @@ -0,0 +1,2 @@ +context/controller +disabled diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_a/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_a/aliases new file mode 100644 index 00000000000000..3497fae52ab5a3 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_a/aliases @@ -0,0 +1,2 @@ +context/controller +unstable diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_b/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_b/aliases new file mode 100644 index 00000000000000..3497fae52ab5a3 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unstable_b/aliases @@ -0,0 +1,2 @@ +context/controller +unstable diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_a/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_a/aliases new file mode 100644 index 00000000000000..a8996396c9b8d2 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_a/aliases @@ -0,0 +1,2 @@ +context/controller +unsupported diff --git a/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_b/aliases b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_b/aliases new file mode 100644 index 00000000000000..a8996396c9b8d2 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/ansible_collections/ns/col/tests/integration/targets/unsupported_b/aliases @@ -0,0 +1,2 @@ +context/controller +unsupported diff --git a/test/integration/targets/ansible-test-integration-targets/runme.sh b/test/integration/targets/ansible-test-integration-targets/runme.sh new file mode 100755 index 00000000000000..bd4470246e7221 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/runme.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +test="$(pwd)/test.py" + +source ../collection/setup.sh + +set -x + +"${test}" -v diff --git a/test/integration/targets/ansible-test-integration-targets/test.py b/test/integration/targets/ansible-test-integration-targets/test.py new file mode 100755 index 00000000000000..443ed59d627000 --- /dev/null +++ b/test/integration/targets/ansible-test-integration-targets/test.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +import subprocess +import unittest + + +class OptionsTest(unittest.TestCase): + options = ( + 'unsupported', + 'disabled', + 'unstable', + 'destructive', + ) + + def test_options(self): + for option in self.options: + with self.subTest(option=option): + try: + command = ['ansible-test', 'integration', '--list-targets'] + + skip_all = subprocess.run([*command, f'{option}_a', f'{option}_b'], text=True, capture_output=True, check=True) + allow_all = subprocess.run([*command, f'--allow-{option}', f'{option}_a', f'{option}_b'], text=True, capture_output=True, check=True) + allow_first = subprocess.run([*command, f'{option}/{option}_a', f'{option}_b'], text=True, capture_output=True, check=True) + allow_last = subprocess.run([*command, f'{option}_a', f'{option}/{option}_b'], text=True, capture_output=True, check=True) + + self.assertEqual(skip_all.stdout.splitlines(), []) + self.assertEqual(allow_all.stdout.splitlines(), [f'{option}_a', f'{option}_b']) + self.assertEqual(allow_first.stdout.splitlines(), [f'{option}_a']) + self.assertEqual(allow_last.stdout.splitlines(), [f'{option}_b']) + except subprocess.CalledProcessError as ex: + raise Exception(f'{ex}:\n>>> Standard Output:\n{ex.stdout}\n>>> Standard Error:\n{ex.stderr}') from ex + + +if __name__ == '__main__': + unittest.main() diff --git a/test/integration/targets/ansible-test-no-tty/aliases b/test/integration/targets/ansible-test-no-tty/aliases new file mode 100644 index 00000000000000..620c2144dbfdb9 --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/aliases @@ -0,0 +1,4 @@ +context/controller +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +needs/target/collection diff --git a/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/run-with-pty.py b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/run-with-pty.py new file mode 100755 index 00000000000000..463915284b2d2c --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/run-with-pty.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +"""Run a command using a PTY.""" + +import sys + +if sys.version_info < (3, 10): + import vendored_pty as pty +else: + import pty + +sys.exit(1 if pty.spawn(sys.argv[1:]) else 0) diff --git a/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/aliases b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/aliases new file mode 100644 index 00000000000000..1af1cf90b6a198 --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/aliases @@ -0,0 +1 @@ +context/controller diff --git a/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/assert-no-tty.py b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/assert-no-tty.py new file mode 100755 index 00000000000000..a2b094e2fcad2d --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/assert-no-tty.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +"""Assert no TTY is available.""" + +import sys + +status = 0 + +for handle in sys.stdin, sys.stdout, sys.stderr: + if handle.isatty(): + print(f'{handle} is a TTY', file=sys.stderr) + status += 1 + +sys.exit(status) diff --git a/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/runme.sh b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/runme.sh new file mode 100755 index 00000000000000..ae712ddfbf84bc --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/tests/integration/targets/no-tty/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +./assert-no-tty.py diff --git a/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/vendored_pty.py b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/vendored_pty.py new file mode 100644 index 00000000000000..bc70803b8d7061 --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/vendored_pty.py @@ -0,0 +1,189 @@ +# Vendored copy of https://github.com/python/cpython/blob/3680ebed7f3e529d01996dd0318601f9f0d02b4b/Lib/pty.py +# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) +"""Pseudo terminal utilities.""" + +# Bugs: No signal handling. Doesn't set slave termios and window size. +# Only tested on Linux, FreeBSD, and macOS. +# See: W. Richard Stevens. 1992. Advanced Programming in the +# UNIX Environment. Chapter 19. +# Author: Steen Lumholt -- with additions by Guido. + +from select import select +import os +import sys +import tty + +# names imported directly for test mocking purposes +from os import close, waitpid +from tty import setraw, tcgetattr, tcsetattr + +__all__ = ["openpty", "fork", "spawn"] + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +CHILD = 0 + +def openpty(): + """openpty() -> (master_fd, slave_fd) + Open a pty master/slave pair, using os.openpty() if possible.""" + + try: + return os.openpty() + except (AttributeError, OSError): + pass + master_fd, slave_name = _open_terminal() + slave_fd = slave_open(slave_name) + return master_fd, slave_fd + +def master_open(): + """master_open() -> (master_fd, slave_name) + Open a pty master and return the fd, and the filename of the slave end. + Deprecated, use openpty() instead.""" + + try: + master_fd, slave_fd = os.openpty() + except (AttributeError, OSError): + pass + else: + slave_name = os.ttyname(slave_fd) + os.close(slave_fd) + return master_fd, slave_name + + return _open_terminal() + +def _open_terminal(): + """Open pty master and return (master_fd, tty_name).""" + for x in 'pqrstuvwxyzPQRST': + for y in '0123456789abcdef': + pty_name = '/dev/pty' + x + y + try: + fd = os.open(pty_name, os.O_RDWR) + except OSError: + continue + return (fd, '/dev/tty' + x + y) + raise OSError('out of pty devices') + +def slave_open(tty_name): + """slave_open(tty_name) -> slave_fd + Open the pty slave and acquire the controlling terminal, returning + opened filedescriptor. + Deprecated, use openpty() instead.""" + + result = os.open(tty_name, os.O_RDWR) + try: + from fcntl import ioctl, I_PUSH + except ImportError: + return result + try: + ioctl(result, I_PUSH, "ptem") + ioctl(result, I_PUSH, "ldterm") + except OSError: + pass + return result + +def fork(): + """fork() -> (pid, master_fd) + Fork and make the child a session leader with a controlling terminal.""" + + try: + pid, fd = os.forkpty() + except (AttributeError, OSError): + pass + else: + if pid == CHILD: + try: + os.setsid() + except OSError: + # os.forkpty() already set us session leader + pass + return pid, fd + + master_fd, slave_fd = openpty() + pid = os.fork() + if pid == CHILD: + # Establish a new session. + os.setsid() + os.close(master_fd) + + # Slave becomes stdin/stdout/stderr of child. + os.dup2(slave_fd, STDIN_FILENO) + os.dup2(slave_fd, STDOUT_FILENO) + os.dup2(slave_fd, STDERR_FILENO) + if slave_fd > STDERR_FILENO: + os.close(slave_fd) + + # Explicitly open the tty to make it become a controlling tty. + tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR) + os.close(tmp_fd) + else: + os.close(slave_fd) + + # Parent and child process. + return pid, master_fd + +def _writen(fd, data): + """Write all the data to a descriptor.""" + while data: + n = os.write(fd, data) + data = data[n:] + +def _read(fd): + """Default read function.""" + return os.read(fd, 1024) + +def _copy(master_fd, master_read=_read, stdin_read=_read): + """Parent copy loop. + Copies + pty master -> standard output (master_read) + standard input -> pty master (stdin_read)""" + fds = [master_fd, STDIN_FILENO] + while fds: + rfds, _wfds, _xfds = select(fds, [], []) + + if master_fd in rfds: + # Some OSes signal EOF by returning an empty byte string, + # some throw OSErrors. + try: + data = master_read(master_fd) + except OSError: + data = b"" + if not data: # Reached EOF. + return # Assume the child process has exited and is + # unreachable, so we clean up. + else: + os.write(STDOUT_FILENO, data) + + if STDIN_FILENO in rfds: + data = stdin_read(STDIN_FILENO) + if not data: + fds.remove(STDIN_FILENO) + else: + _writen(master_fd, data) + +def spawn(argv, master_read=_read, stdin_read=_read): + """Create a spawned process.""" + if isinstance(argv, str): + argv = (argv,) + sys.audit('pty.spawn', argv) + + pid, master_fd = fork() + if pid == CHILD: + os.execlp(argv[0], *argv) + + try: + mode = tcgetattr(STDIN_FILENO) + setraw(STDIN_FILENO) + restore = True + except tty.error: # This is the same as termios.error + restore = False + + try: + _copy(master_fd, master_read, stdin_read) + finally: + if restore: + tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode) + + close(master_fd) + return waitpid(pid, 0)[1] diff --git a/test/integration/targets/ansible-test-no-tty/runme.sh b/test/integration/targets/ansible-test-no-tty/runme.sh new file mode 100755 index 00000000000000..c02793a1236a99 --- /dev/null +++ b/test/integration/targets/ansible-test-no-tty/runme.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Verify that ansible-test runs integration tests without a TTY. + +source ../collection/setup.sh + +set -x + +if ./run-with-pty.py tests/integration/targets/no-tty/assert-no-tty.py > /dev/null; then + echo "PTY assertion did not fail. Either PTY creation failed or PTY detection is broken." + exit 1 +fi + +./run-with-pty.py ansible-test integration --color "${@}" diff --git a/test/integration/targets/ansible-test-sanity-lint/aliases b/test/integration/targets/ansible-test-sanity-lint/aliases new file mode 100644 index 00000000000000..193276cc9e5df3 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-lint/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-sanity-lint/expected.txt b/test/integration/targets/ansible-test-sanity-lint/expected.txt new file mode 100644 index 00000000000000..94238c8a879ab0 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-lint/expected.txt @@ -0,0 +1 @@ +plugins/modules/python-wrong-shebang.py:1:1: expected module shebang "b'#!/usr/bin/python'" but found: b'#!invalid' diff --git a/test/integration/targets/ansible-test-sanity-lint/runme.sh b/test/integration/targets/ansible-test-sanity-lint/runme.sh new file mode 100755 index 00000000000000..3e73cb4a615b99 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-lint/runme.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +# Make sure that `ansible-test sanity --lint` outputs the correct format to stdout, even when delegation is used. + +set -eu + +# Create test scenarios at runtime that do not pass sanity tests. +# This avoids the need to create ignore entries for the tests. + +mkdir -p ansible_collections/ns/col/plugins/modules + +( + cd ansible_collections/ns/col/plugins/modules + + echo '#!invalid' > python-wrong-shebang.py # expected module shebang "b'#!/usr/bin/python'" but found: b'#!invalid' +) + +source ../collection/setup.sh + +set -x + +### +### Run the sanity test with the `--lint` option. +### + +# Use the `--venv` option to verify that delegation preserves the output streams. +ansible-test sanity --test shebang --color --failure-ok --lint --venv "${@}" 1> actual-stdout.txt 2> actual-stderr.txt +diff -u "${TEST_DIR}/expected.txt" actual-stdout.txt +grep -f "${TEST_DIR}/expected.txt" actual-stderr.txt + +# Run without delegation to verify direct output uses the correct streams. +ansible-test sanity --test shebang --color --failure-ok --lint "${@}" 1> actual-stdout.txt 2> actual-stderr.txt +diff -u "${TEST_DIR}/expected.txt" actual-stdout.txt +grep -f "${TEST_DIR}/expected.txt" actual-stderr.txt + +### +### Run the sanity test without the `--lint` option. +### + +# Use the `--venv` option to verify that delegation preserves the output streams. +ansible-test sanity --test shebang --color --failure-ok --venv "${@}" 1> actual-stdout.txt 2> actual-stderr.txt +grep -f "${TEST_DIR}/expected.txt" actual-stdout.txt +[ ! -s actual-stderr.txt ] + +# Run without delegation to verify direct output uses the correct streams. +ansible-test sanity --test shebang --color --failure-ok "${@}" 1> actual-stdout.txt 2> actual-stderr.txt +grep -f "${TEST_DIR}/expected.txt" actual-stdout.txt +[ ! -s actual-stderr.txt ] diff --git a/test/integration/targets/ansible-test-sanity-shebang/aliases b/test/integration/targets/ansible-test-sanity-shebang/aliases new file mode 100644 index 00000000000000..193276cc9e5df3 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/powershell.ps1 b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/powershell.ps1 new file mode 100644 index 00000000000000..9eb7192c0e77d2 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/powershell.ps1 @@ -0,0 +1 @@ +#!powershell diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/python-no-shebang.py b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/python-no-shebang.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/python.py b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/python.py new file mode 100644 index 00000000000000..013e4b7ec4eaa4 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/plugins/modules/python.py @@ -0,0 +1 @@ +#!/usr/bin/python diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_bash.sh b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_bash.sh new file mode 100755 index 00000000000000..f1f641af19bf62 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_bash.sh @@ -0,0 +1 @@ +#!/usr/bin/env bash diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_python.py b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_python.py new file mode 100755 index 00000000000000..4265cc3e6c16c0 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/env_python.py @@ -0,0 +1 @@ +#!/usr/bin/env python diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/sh.sh b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/sh.sh new file mode 100755 index 00000000000000..1a2485251c33a7 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/scripts/sh.sh @@ -0,0 +1 @@ +#!/bin/sh diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_bash.sh b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_bash.sh new file mode 100755 index 00000000000000..f1f641af19bf62 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_bash.sh @@ -0,0 +1 @@ +#!/usr/bin/env bash diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_python.py b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_python.py new file mode 100755 index 00000000000000..4265cc3e6c16c0 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/env_python.py @@ -0,0 +1 @@ +#!/usr/bin/env python diff --git a/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/sh.sh b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/sh.sh new file mode 100755 index 00000000000000..1a2485251c33a7 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/ansible_collections/ns/col/tests/integration/targets/valid/sh.sh @@ -0,0 +1 @@ +#!/bin/sh diff --git a/test/integration/targets/ansible-test-sanity-shebang/expected.txt b/test/integration/targets/ansible-test-sanity-shebang/expected.txt new file mode 100644 index 00000000000000..fbd733060ded28 --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/expected.txt @@ -0,0 +1,9 @@ +plugins/modules/no-shebang-executable.py:0:0: file without shebang should not be executable +plugins/modules/python-executable.py:0:0: module should not be executable +plugins/modules/python-wrong-shebang.py:1:1: expected module shebang "b'#!/usr/bin/python'" but found: b'#!invalid' +plugins/modules/utf-16-be-bom.py:0:0: file starts with a UTF-16 (BE) byte order mark +plugins/modules/utf-16-le-bom.py:0:0: file starts with a UTF-16 (LE) byte order mark +plugins/modules/utf-32-be-bom.py:0:0: file starts with a UTF-32 (BE) byte order mark +plugins/modules/utf-32-le-bom.py:0:0: file starts with a UTF-32 (LE) byte order mark +plugins/modules/utf-8-bom.py:0:0: file starts with a UTF-8 byte order mark +scripts/unexpected-shebang:1:1: unexpected non-module shebang: b'#!/usr/bin/custom' diff --git a/test/integration/targets/ansible-test-sanity-shebang/runme.sh b/test/integration/targets/ansible-test-sanity-shebang/runme.sh new file mode 100755 index 00000000000000..0fd3bce833f2ab --- /dev/null +++ b/test/integration/targets/ansible-test-sanity-shebang/runme.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -eu + +# Create test scenarios at runtime that do not pass sanity tests. +# This avoids the need to create ignore entries for the tests. + +( + cd ansible_collections/ns/col/plugins/modules + + touch no-shebang-executable.py && chmod +x no-shebang-executable.py # file without shebang should not be executable + python -c "open('utf-32-be-bom.py', 'wb').write(b'\x00\x00\xFE\xFF')" # file starts with a UTF-32 (BE) byte order mark + python -c "open('utf-32-le-bom.py', 'wb').write(b'\xFF\xFE\x00\x00')" # file starts with a UTF-32 (LE) byte order mark + python -c "open('utf-16-be-bom.py', 'wb').write(b'\xFE\xFF')" # file starts with a UTF-16 (BE) byte order mark + python -c "open('utf-16-le-bom.py', 'wb').write(b'\xFF\xFE')" # file starts with a UTF-16 (LE) byte order mark + python -c "open('utf-8-bom.py', 'wb').write(b'\xEF\xBB\xBF')" # file starts with a UTF-8 byte order mark + echo '#!/usr/bin/python' > python-executable.py && chmod +x python-executable.py # module should not be executable + echo '#!invalid' > python-wrong-shebang.py # expected module shebang "b'#!/usr/bin/python'" but found: b'#!invalid' +) + +( + cd ansible_collections/ns/col/scripts + + echo '#!/usr/bin/custom' > unexpected-shebang # unexpected non-module shebang: b'#!/usr/bin/custom' + + echo '#!/usr/bin/make -f' > Makefile && chmod +x Makefile # pass + echo '#!/bin/bash -eu' > bash_eu.sh && chmod +x bash_eu.sh # pass + echo '#!/bin/bash -eux' > bash_eux.sh && chmod +x bash_eux.sh # pass + echo '#!/usr/bin/env fish' > env_fish.fish && chmod +x env_fish.fish # pass + echo '#!/usr/bin/env pwsh' > env_pwsh.ps1 && chmod +x env_pwsh.ps1 # pass +) + +mkdir ansible_collections/ns/col/examples + +source ../collection/setup.sh + +set -x + +ansible-test sanity --test shebang --color --lint --failure-ok "${@}" > actual.txt + +diff -u "${TEST_DIR}/expected.txt" actual.txt diff --git a/test/integration/targets/ansible-test-shell/aliases b/test/integration/targets/ansible-test-shell/aliases new file mode 100644 index 00000000000000..193276cc9e5df3 --- /dev/null +++ b/test/integration/targets/ansible-test-shell/aliases @@ -0,0 +1,4 @@ +shippable/posix/group1 # runs in the distro test containers +shippable/generic/group1 # runs in the default test container +context/controller +needs/target/collection diff --git a/test/integration/targets/ansible-test-shell/ansible_collections/ns/col/.keep b/test/integration/targets/ansible-test-shell/ansible_collections/ns/col/.keep new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/integration/targets/ansible-test-shell/expected-stderr.txt b/test/integration/targets/ansible-test-shell/expected-stderr.txt new file mode 100644 index 00000000000000..af6415db3c7240 --- /dev/null +++ b/test/integration/targets/ansible-test-shell/expected-stderr.txt @@ -0,0 +1 @@ +stderr diff --git a/test/integration/targets/ansible-test-shell/expected-stdout.txt b/test/integration/targets/ansible-test-shell/expected-stdout.txt new file mode 100644 index 00000000000000..faa3a15c184b50 --- /dev/null +++ b/test/integration/targets/ansible-test-shell/expected-stdout.txt @@ -0,0 +1 @@ +stdout diff --git a/test/integration/targets/ansible-test-shell/runme.sh b/test/integration/targets/ansible-test-shell/runme.sh new file mode 100755 index 00000000000000..0e0d18ae30ec03 --- /dev/null +++ b/test/integration/targets/ansible-test-shell/runme.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Make sure that `ansible-test shell` outputs to the correct stream. + +set -eu + +source ../collection/setup.sh + +set -x + +# Try `shell` with delegation. + +ansible-test shell --venv -- \ + python -c 'import sys; print("stdout"); print("stderr", file=sys.stderr)' 1> actual-stdout.txt 2> actual-stderr.txt + +cat actual-stdout.txt +cat actual-stderr.txt + +diff -u "${TEST_DIR}/expected-stdout.txt" actual-stdout.txt +grep -f "${TEST_DIR}/expected-stderr.txt" actual-stderr.txt + +# Try `shell` without delegation. + +ansible-test shell -- \ + python -c 'import sys; print("stdout"); print("stderr", file=sys.stderr)' 1> actual-stdout.txt 2> actual-stderr.txt + +cat actual-stdout.txt +cat actual-stderr.txt + +diff -u "${TEST_DIR}/expected-stdout.txt" actual-stdout.txt +grep -f "${TEST_DIR}/expected-stderr.txt" actual-stderr.txt diff --git a/test/integration/targets/ansible-test/aliases b/test/integration/targets/ansible-test/aliases index b98e7bb217b468..002fe2cfb2f2fa 100644 --- a/test/integration/targets/ansible-test/aliases +++ b/test/integration/targets/ansible-test/aliases @@ -1,4 +1,5 @@ shippable/posix/group1 # runs in the distro test containers shippable/generic/group1 # runs in the default test container context/controller +needs/target/collection destructive # adds and then removes packages into lib/ansible/_vendor/ diff --git a/test/integration/targets/ansible-test/collection-tests/coverage.sh b/test/integration/targets/ansible-test/collection-tests/coverage.sh index c2336a32287678..ddc0f9b4ef40b3 100755 --- a/test/integration/targets/ansible-test/collection-tests/coverage.sh +++ b/test/integration/targets/ansible-test/collection-tests/coverage.sh @@ -5,7 +5,7 @@ set -eux -o pipefail cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" cd "${WORK_DIR}/ansible_collections/ns/col" -"${TEST_DIR}/collection-tests/update-ignore.py" +"${TEST_DIR}/../collection/update-ignore.py" # common args for all tests common=(--venv --color --truncate 0 "${@}") diff --git a/test/integration/targets/ansible-test/collection-tests/sanity-vendor.sh b/test/integration/targets/ansible-test/collection-tests/sanity-vendor.sh index 0fcd659ba2904d..72043bfdc28a81 100755 --- a/test/integration/targets/ansible-test/collection-tests/sanity-vendor.sh +++ b/test/integration/targets/ansible-test/collection-tests/sanity-vendor.sh @@ -5,7 +5,7 @@ set -eux -o pipefail cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" cd "${WORK_DIR}/ansible_collections/ns/col" -"${TEST_DIR}/collection-tests/update-ignore.py" +"${TEST_DIR}/../collection/update-ignore.py" vendor_dir="$(python -c 'import pathlib, ansible._vendor; print(pathlib.Path(ansible._vendor.__file__).parent)')" diff --git a/test/integration/targets/ansible-test/collection-tests/sanity.sh b/test/integration/targets/ansible-test/collection-tests/sanity.sh index 21e8607b83bdb1..99d9b427fb1698 100755 --- a/test/integration/targets/ansible-test/collection-tests/sanity.sh +++ b/test/integration/targets/ansible-test/collection-tests/sanity.sh @@ -5,6 +5,6 @@ set -eux -o pipefail cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" cd "${WORK_DIR}/ansible_collections/ns/col" -"${TEST_DIR}/collection-tests/update-ignore.py" +"${TEST_DIR}/../collection/update-ignore.py" ansible-test sanity --color --truncate 0 "${@}" diff --git a/test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh b/test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh index 713bd5d637b41b..b1b9508a75f42b 100755 --- a/test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh +++ b/test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh @@ -4,7 +4,14 @@ set -eux -o pipefail cd "${WORK_DIR}" -if ansible-test --help 1>stdout 2>stderr; then +# some options should succeed even in an unsupported directory +ansible-test --help +ansible-test --version + +# the --help option should show the current working directory when it is unsupported +ansible-test --help 2>&1 | grep '^Current working directory: ' + +if ansible-test sanity 1>stdout 2>stderr; then echo "ansible-test did not fail" exit 1 fi diff --git a/test/integration/targets/apt/aliases b/test/integration/targets/apt/aliases index 941bce38b8c968..ce9d97d45ffbad 100644 --- a/test/integration/targets/apt/aliases +++ b/test/integration/targets/apt/aliases @@ -4,4 +4,3 @@ skip/freebsd skip/osx skip/macos skip/rhel -skip/aix diff --git a/test/integration/targets/apt_key/aliases b/test/integration/targets/apt_key/aliases index f46fd7011a968d..a820ec90821adb 100644 --- a/test/integration/targets/apt_key/aliases +++ b/test/integration/targets/apt_key/aliases @@ -3,4 +3,3 @@ skip/freebsd skip/osx skip/macos skip/rhel -skip/aix diff --git a/test/integration/targets/apt_repository/aliases b/test/integration/targets/apt_repository/aliases index 7e4621902ab896..34e2b54058f8b0 100644 --- a/test/integration/targets/apt_repository/aliases +++ b/test/integration/targets/apt_repository/aliases @@ -4,4 +4,3 @@ skip/freebsd skip/osx skip/macos skip/rhel -skip/aix diff --git a/test/integration/targets/async/aliases b/test/integration/targets/async/aliases index 4d56e5c7f88f94..c989cd70c37690 100644 --- a/test/integration/targets/async/aliases +++ b/test/integration/targets/async/aliases @@ -1,4 +1,3 @@ async_status async_wrapper shippable/posix/group2 -skip/aix diff --git a/test/integration/targets/become/aliases b/test/integration/targets/become/aliases index ad691e7d036d36..db54e68c9285a9 100644 --- a/test/integration/targets/become/aliases +++ b/test/integration/targets/become/aliases @@ -1,4 +1,3 @@ destructive shippable/posix/group1 -skip/aix context/target diff --git a/test/integration/targets/binary_modules/Makefile b/test/integration/targets/binary_modules/Makefile index c3092e47404d9f..9c0b7baf85c33c 100644 --- a/test/integration/targets/binary_modules/Makefile +++ b/test/integration/targets/binary_modules/Makefile @@ -7,7 +7,6 @@ all: cd library; \ GOOS=linux GOARCH=amd64 go build -o helloworld_linux_x86_64 helloworld.go; \ GOOS=linux GOARCH=ppc64le go build -o helloworld_linux_ppc64le helloworld.go; \ - GOOS=aix GOARCH=ppc64 go build -o helloworld_aix_chrp helloworld.go; \ GOOS=windows GOARCH=amd64 go build -o helloworld_win32nt_64-bit.exe helloworld.go; \ GOOS=darwin GOARCH=amd64 go build -o helloworld_darwin_x86_64 helloworld.go; \ GOOS=freebsd GOARCH=amd64 go build -o helloworld_freebsd_amd64 helloworld.go diff --git a/test/integration/targets/collection/aliases b/test/integration/targets/collection/aliases new file mode 100644 index 00000000000000..136c05e0d0290e --- /dev/null +++ b/test/integration/targets/collection/aliases @@ -0,0 +1 @@ +hidden diff --git a/test/integration/targets/collection/setup.sh b/test/integration/targets/collection/setup.sh new file mode 100755 index 00000000000000..f1b33a55b00d82 --- /dev/null +++ b/test/integration/targets/collection/setup.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Source this file from collection integration tests. +# +# It simplifies several aspects of collection testing: +# +# 1) Collection tests must be executed outside of the ansible source tree. +# Otherwise ansible-test will test the ansible source instead of the test collection. +# The temporary directory provided by ansible-test resides within the ansible source tree. +# +# 2) Sanity test ignore files for collections must be versioned based on the ansible-core version being used. +# This script generates an ignore file with the correct filename for the current ansible-core version. +# +# 3) Sanity tests which are multi-version require an ignore entry per Python version. +# This script replicates these ignore entries for each supported Python version based on the ignored path. + +set -eu -o pipefail + +export TEST_DIR +export WORK_DIR + +TEST_DIR="$PWD" +WORK_DIR="$(mktemp -d)" + +trap 'rm -rf "${WORK_DIR}"' EXIT + +cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}" +cd "${WORK_DIR}/ansible_collections/ns/col" + +"${TEST_DIR}/../collection/update-ignore.py" diff --git a/test/integration/targets/ansible-test/collection-tests/update-ignore.py b/test/integration/targets/collection/update-ignore.py similarity index 91% rename from test/integration/targets/ansible-test/collection-tests/update-ignore.py rename to test/integration/targets/collection/update-ignore.py index 51ddf9ac3a844d..92a702cf13e146 100755 --- a/test/integration/targets/ansible-test/collection-tests/update-ignore.py +++ b/test/integration/targets/collection/update-ignore.py @@ -16,6 +16,11 @@ def main(): from ansible_test._internal import constants src_path = 'tests/sanity/ignore.txt' + + if not os.path.exists(src_path): + print(f'Skipping updates on non-existent ignore file: {src_path}') + return + directory = os.path.dirname(src_path) name, ext = os.path.splitext(os.path.basename(src_path)) major_minor = '.'.join(release.__version__.split('.')[:2]) diff --git a/test/integration/targets/command_shell/aliases b/test/integration/targets/command_shell/aliases index 8dd7b884545080..a1bd9947decf6d 100644 --- a/test/integration/targets/command_shell/aliases +++ b/test/integration/targets/command_shell/aliases @@ -1,4 +1,3 @@ command shippable/posix/group2 shell -skip/aix diff --git a/test/integration/targets/copy/aliases b/test/integration/targets/copy/aliases index db9bbd8c424942..961b20518e2967 100644 --- a/test/integration/targets/copy/aliases +++ b/test/integration/targets/copy/aliases @@ -1,4 +1,3 @@ needs/root shippable/posix/group2 destructive -skip/aix diff --git a/test/integration/targets/cron/aliases b/test/integration/targets/cron/aliases index b2033afd63b995..e1a9ab481820be 100644 --- a/test/integration/targets/cron/aliases +++ b/test/integration/targets/cron/aliases @@ -1,5 +1,4 @@ destructive shippable/posix/group4 -skip/aix skip/osx skip/macos diff --git a/test/integration/targets/debconf/aliases b/test/integration/targets/debconf/aliases index f8e28c7e4697b2..a6dafcf8cd8b03 100644 --- a/test/integration/targets/debconf/aliases +++ b/test/integration/targets/debconf/aliases @@ -1,2 +1 @@ shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/dnf/aliases b/test/integration/targets/dnf/aliases index 4d1afd64358bd6..e555959e464166 100644 --- a/test/integration/targets/dnf/aliases +++ b/test/integration/targets/dnf/aliases @@ -1,6 +1,5 @@ destructive shippable/posix/group4 -skip/aix skip/power/centos skip/freebsd skip/osx diff --git a/test/integration/targets/dnf/tasks/main.yml b/test/integration/targets/dnf/tasks/main.yml index d66a06530b8a15..45187f9a61401d 100644 --- a/test/integration/targets/dnf/tasks/main.yml +++ b/test/integration/targets/dnf/tasks/main.yml @@ -66,10 +66,12 @@ # # This fails due to conflicts on Fedora 34, but we can nuke this entirely once # #74224 lands, because it covers nobest cases. +# Skipped in RHEL9 by changing the version test to == instead of >= +# due to missing RHEL9 docker-ce packages currently - include_tasks: nobest.yml when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=') and ansible_distribution_major_version is version('34', '!=')) or - (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>=')) + (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '==')) - include_tasks: cacheonly.yml diff --git a/test/integration/targets/dnf/vars/RedHat-9.yml b/test/integration/targets/dnf/vars/RedHat-9.yml new file mode 100644 index 00000000000000..e700a9b9afcb7d --- /dev/null +++ b/test/integration/targets/dnf/vars/RedHat-9.yml @@ -0,0 +1,2 @@ +astream_name: '@container-tools:latest/common' +astream_name_no_stream: '@container-tools/common' diff --git a/test/integration/targets/dpkg_selections/aliases b/test/integration/targets/dpkg_selections/aliases index 55da8c88b82c21..c0d5684bcba002 100644 --- a/test/integration/targets/dpkg_selections/aliases +++ b/test/integration/targets/dpkg_selections/aliases @@ -1,6 +1,5 @@ shippable/posix/group1 destructive -skip/aix skip/freebsd skip/osx skip/macos diff --git a/test/integration/targets/file/aliases b/test/integration/targets/file/aliases index 4a2ce27cbda667..6bd893d49f021a 100644 --- a/test/integration/targets/file/aliases +++ b/test/integration/targets/file/aliases @@ -1,3 +1,2 @@ shippable/posix/group2 needs/root -skip/aix diff --git a/test/integration/targets/get_url/aliases b/test/integration/targets/get_url/aliases index f82a267ba019ea..90ef161f598809 100644 --- a/test/integration/targets/get_url/aliases +++ b/test/integration/targets/get_url/aliases @@ -1,4 +1,3 @@ destructive shippable/posix/group1 needs/httptester -skip/aix diff --git a/test/integration/targets/getent/aliases b/test/integration/targets/getent/aliases index f8e28c7e4697b2..a6dafcf8cd8b03 100644 --- a/test/integration/targets/getent/aliases +++ b/test/integration/targets/getent/aliases @@ -1,2 +1 @@ shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/git/aliases b/test/integration/targets/git/aliases index f71c8117c74784..3005e4b26d029a 100644 --- a/test/integration/targets/git/aliases +++ b/test/integration/targets/git/aliases @@ -1,2 +1 @@ shippable/posix/group4 -skip/aix diff --git a/test/integration/targets/group/aliases b/test/integration/targets/group/aliases index f8e28c7e4697b2..a6dafcf8cd8b03 100644 --- a/test/integration/targets/group/aliases +++ b/test/integration/targets/group/aliases @@ -1,2 +1 @@ shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/hostname/aliases b/test/integration/targets/hostname/aliases index c552d611499b11..6eae8bd8ddc2b5 100644 --- a/test/integration/targets/hostname/aliases +++ b/test/integration/targets/hostname/aliases @@ -1,3 +1,2 @@ shippable/posix/group1 destructive -skip/aix # currently unsupported by hostname module diff --git a/test/integration/targets/incidental_inventory_docker_swarm/aliases b/test/integration/targets/incidental_inventory_docker_swarm/aliases index 74d3befedf63a6..66362758d5fba8 100644 --- a/test/integration/targets/incidental_inventory_docker_swarm/aliases +++ b/test/integration/targets/incidental_inventory_docker_swarm/aliases @@ -3,6 +3,7 @@ context/controller skip/osx skip/macos skip/freebsd +skip/rhel/9.0b # there are no docker-ce packages for CentOS/RHEL 9 destructive skip/docker # The tests sometimes make docker daemon unstable; hence, # we skip all docker-based CI runs to avoid disrupting diff --git a/test/integration/targets/interpreter_discovery_python/tasks/main.yml b/test/integration/targets/interpreter_discovery_python/tasks/main.yml index 770de0c591a7f7..c8b064df31f5db 100644 --- a/test/integration/targets/interpreter_discovery_python/tasks/main.yml +++ b/test/integration/targets/interpreter_discovery_python/tasks/main.yml @@ -157,8 +157,10 @@ that: # rhel 6/7 - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and distro_version is version('8','<')) or distro_version is version('8','>=') - # rhel 8+ - - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/libexec/platform-python' and distro_version is version('8','>=')) or distro_version is version('8','<') + # rhel 8 + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/libexec/platform-python' and distro_version is version('8','==')) or distro_version is version('8','!=') + # rhel 9 + - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3' and distro_version is version('9','==')) or distro_version is version('9','!=') when: distro == 'redhat' - name: ubuntu assertions diff --git a/test/integration/targets/package/aliases b/test/integration/targets/package/aliases index 0b484bbab6aa8e..6eae8bd8ddc2b5 100644 --- a/test/integration/targets/package/aliases +++ b/test/integration/targets/package/aliases @@ -1,3 +1,2 @@ shippable/posix/group1 destructive -skip/aix diff --git a/test/integration/targets/package_facts/aliases b/test/integration/targets/package_facts/aliases index 6c62b9a752e084..738ccddee2c1f4 100644 --- a/test/integration/targets/package_facts/aliases +++ b/test/integration/targets/package_facts/aliases @@ -1,4 +1,3 @@ shippable/posix/group3 -skip/aix skip/osx skip/macos diff --git a/test/integration/targets/pip/aliases b/test/integration/targets/pip/aliases index 8d8cc50ef859b6..0d91b7de01c742 100644 --- a/test/integration/targets/pip/aliases +++ b/test/integration/targets/pip/aliases @@ -1,3 +1,2 @@ destructive shippable/posix/group5 -skip/aix diff --git a/test/integration/targets/prepare_http_tests/tasks/main.yml b/test/integration/targets/prepare_http_tests/tasks/main.yml index 9ab00221ad5cd8..8d34a3cd7b52dd 100644 --- a/test/integration/targets/prepare_http_tests/tasks/main.yml +++ b/test/integration/targets/prepare_http_tests/tasks/main.yml @@ -16,6 +16,7 @@ - include_tasks: "{{ lookup('first_found', files)}}" vars: files: + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version }}.yml" - "{{ ansible_os_family | lower }}.yml" - "default.yml" when: diff --git a/test/integration/targets/prepare_http_tests/vars/RedHat-9.yml b/test/integration/targets/prepare_http_tests/vars/RedHat-9.yml new file mode 100644 index 00000000000000..2618233cbd214d --- /dev/null +++ b/test/integration/targets/prepare_http_tests/vars/RedHat-9.yml @@ -0,0 +1,4 @@ +krb5_packages: +- krb5-devel +- krb5-workstation +- redhat-rpm-config # needed for gssapi install diff --git a/test/integration/targets/remote_tmp/aliases b/test/integration/targets/remote_tmp/aliases index 4b8559d973d70c..0418db384c16de 100644 --- a/test/integration/targets/remote_tmp/aliases +++ b/test/integration/targets/remote_tmp/aliases @@ -1,4 +1,3 @@ shippable/posix/group3 -skip/aix context/target needs/target/setup_remote_tmp_dir diff --git a/test/integration/targets/rpm_key/aliases b/test/integration/targets/rpm_key/aliases index 3a07aab32d1fec..a4c92ef8538742 100644 --- a/test/integration/targets/rpm_key/aliases +++ b/test/integration/targets/rpm_key/aliases @@ -1,3 +1,2 @@ destructive shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/service/aliases b/test/integration/targets/service/aliases index 1ef4c3619a803e..f2f9ac9ddc4a84 100644 --- a/test/integration/targets/service/aliases +++ b/test/integration/targets/service/aliases @@ -1,5 +1,4 @@ destructive shippable/posix/group1 -skip/aix skip/osx skip/macos diff --git a/test/integration/targets/service_facts/aliases b/test/integration/targets/service_facts/aliases index cc0aa0d99ade6b..9470f7732f850b 100644 --- a/test/integration/targets/service_facts/aliases +++ b/test/integration/targets/service_facts/aliases @@ -1,5 +1,4 @@ shippable/posix/group3 -skip/aix skip/freebsd skip/osx skip/macos diff --git a/test/integration/targets/setup_paramiko/install-RedHat-9-python-3.yml b/test/integration/targets/setup_paramiko/install-RedHat-9-python-3.yml new file mode 100644 index 00000000000000..ca391556e64b6f --- /dev/null +++ b/test/integration/targets/setup_paramiko/install-RedHat-9-python-3.yml @@ -0,0 +1,9 @@ +- name: Setup remote constraints + include_tasks: setup-remote-constraints.yml +- name: Install Paramiko for Python 3 on RHEL 9 + pip: # no python3-paramiko package exists for RHEL 9 + name: paramiko + extra_args: "-c {{ remote_constraints }}" + +- name: Drop the crypto-policy to LEGACY for these tests + command: update-crypto-policies --set LEGACY diff --git a/test/integration/targets/setup_paramiko/uninstall-RedHat-9-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-RedHat-9-python-3.yml new file mode 100644 index 00000000000000..f46ec55bea3a61 --- /dev/null +++ b/test/integration/targets/setup_paramiko/uninstall-RedHat-9-python-3.yml @@ -0,0 +1,7 @@ +- name: Uninstall Paramiko for Python 3 on RHEL 9 + pip: # no python3-paramiko package exists for RHEL 9 + name: paramiko + state: absent + +- name: Revert the crypto-policy back to DEFAULT + command: update-crypto-policies --set DEFAULT diff --git a/test/integration/targets/setup_rpm_repo/tasks/main.yml b/test/integration/targets/setup_rpm_repo/tasks/main.yml index b2c9ae1ba6c620..be20078f1bd662 100644 --- a/test/integration/targets/setup_rpm_repo/tasks/main.yml +++ b/test/integration/targets/setup_rpm_repo/tasks/main.yml @@ -24,6 +24,11 @@ args: name: "{{ rpm_repo_packages }}" + - name: Install rpmfluff via pip + pip: + name: rpmfluff + when: ansible_facts.os_family == 'RedHat' and ansible_distribution_major_version is version('9', '==') + - set_fact: repos: - "fake-{{ ansible_architecture }}" diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-9.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-9.yml new file mode 100644 index 00000000000000..84849e2341c687 --- /dev/null +++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-9.yml @@ -0,0 +1,4 @@ +rpm_repo_packages: + - rpm-build + - createrepo_c + - createrepo diff --git a/test/integration/targets/subversion/aliases b/test/integration/targets/subversion/aliases index 1dd2724ef246fd..23ada3cc37ca8b 100644 --- a/test/integration/targets/subversion/aliases +++ b/test/integration/targets/subversion/aliases @@ -1,7 +1,7 @@ setup/always/setup_passlib shippable/posix/group2 -skip/aix skip/osx skip/macos +skip/rhel/9.0b # svn checkout hangs destructive needs/root diff --git a/test/integration/targets/systemd/aliases b/test/integration/targets/systemd/aliases index f8e28c7e4697b2..a6dafcf8cd8b03 100644 --- a/test/integration/targets/systemd/aliases +++ b/test/integration/targets/systemd/aliases @@ -1,2 +1 @@ shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/unarchive/aliases b/test/integration/targets/unarchive/aliases index db9bbd8c424942..961b20518e2967 100644 --- a/test/integration/targets/unarchive/aliases +++ b/test/integration/targets/unarchive/aliases @@ -1,4 +1,3 @@ needs/root shippable/posix/group2 destructive -skip/aix diff --git a/test/integration/targets/unsafe_writes/aliases b/test/integration/targets/unsafe_writes/aliases index cf954afc1f025e..0d8146e7dd49d4 100644 --- a/test/integration/targets/unsafe_writes/aliases +++ b/test/integration/targets/unsafe_writes/aliases @@ -3,6 +3,5 @@ needs/root skip/freebsd skip/osx skip/macos -skip/aix shippable/posix/group3 needs/target/setup_remote_tmp_dir diff --git a/test/integration/targets/uri/aliases b/test/integration/targets/uri/aliases index 11e91ee7ab3cc5..6c31bd09ca4bbe 100644 --- a/test/integration/targets/uri/aliases +++ b/test/integration/targets/uri/aliases @@ -1,4 +1,3 @@ destructive shippable/posix/group4 needs/httptester -skip/aix diff --git a/test/integration/targets/user/aliases b/test/integration/targets/user/aliases index 3a07aab32d1fec..a4c92ef8538742 100644 --- a/test/integration/targets/user/aliases +++ b/test/integration/targets/user/aliases @@ -1,3 +1,2 @@ destructive shippable/posix/group1 -skip/aix diff --git a/test/integration/targets/yum/aliases b/test/integration/targets/yum/aliases index 5aba303dc9f980..aed613834585a2 100644 --- a/test/integration/targets/yum/aliases +++ b/test/integration/targets/yum/aliases @@ -1,6 +1,5 @@ destructive shippable/posix/group4 -skip/aix skip/freebsd skip/osx skip/macos diff --git a/test/integration/targets/yum_repository/aliases b/test/integration/targets/yum_repository/aliases index 0b484bbab6aa8e..6eae8bd8ddc2b5 100644 --- a/test/integration/targets/yum_repository/aliases +++ b/test/integration/targets/yum_repository/aliases @@ -1,3 +1,2 @@ shippable/posix/group1 destructive -skip/aix diff --git a/test/lib/ansible_test/__init__.py b/test/lib/ansible_test/__init__.py index e69de29bb2d1d6..527d413a98d8f7 100644 --- a/test/lib/ansible_test/__init__.py +++ b/test/lib/ansible_test/__init__.py @@ -0,0 +1,2 @@ +# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x. +# This allows the ansible-test entry point to report supported Python versions before exiting. diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt index 05d173a63f0dc6..b56d4a9368db63 100644 --- a/test/lib/ansible_test/_data/completion/docker.txt +++ b/test/lib/ansible_test/_data/completion/docker.txt @@ -1,13 +1,13 @@ -base image=quay.io/ansible/base-test-container:1.1.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined -default image=quay.io/ansible/default-test-container:4.2.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined context=collection -default image=quay.io/ansible/ansible-core-test-container:4.2.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined context=ansible-core -alpine3 image=quay.io/ansible/alpine3-test-container:3.3.0 python=3.9 -centos6 image=quay.io/ansible/centos6-test-container:3.1.0 python=2.6 seccomp=unconfined -centos7 image=quay.io/ansible/centos7-test-container:3.1.0 python=2.7 seccomp=unconfined -centos8 image=quay.io/ansible/centos8-test-container:3.1.0 python=3.6 seccomp=unconfined -fedora33 image=quay.io/ansible/fedora33-test-container:3.1.0 python=3.9 -fedora34 image=quay.io/ansible/fedora34-test-container:3.1.0 python=3.9 seccomp=unconfined -opensuse15py2 image=quay.io/ansible/opensuse15py2-test-container:3.1.0 python=2.7 -opensuse15 image=quay.io/ansible/opensuse15-test-container:3.1.0 python=3.6 -ubuntu1804 image=quay.io/ansible/ubuntu1804-test-container:3.1.0 python=3.6 seccomp=unconfined -ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:3.1.0 python=3.8 seccomp=unconfined +base image=quay.io/ansible/base-test-container:1.1.1 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 +default image=quay.io/ansible/default-test-container:4.2.1 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 context=collection +default image=quay.io/ansible/ansible-core-test-container:4.2.1 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 context=ansible-core +alpine3 image=quay.io/ansible/alpine3-test-container:3.3.1 python=3.9 cgroup=none audit=none +centos6 image=quay.io/ansible/centos6-test-container:3.1.2 python=2.6 cgroup=none +centos7 image=quay.io/ansible/centos7-test-container:3.1.1 python=2.7 cgroup=v1-only +centos8 image=quay.io/ansible/centos8-test-container:3.1.1 python=3.6 +fedora33 image=quay.io/ansible/fedora33-test-container:3.1.1 python=3.9 +fedora34 image=quay.io/ansible/fedora34-test-container:3.1.1 python=3.9 +opensuse15py2 image=quay.io/ansible/opensuse15py2-test-container:3.1.1 python=2.7 +opensuse15 image=quay.io/ansible/opensuse15-test-container:3.1.1 python=3.6 +ubuntu1804 image=quay.io/ansible/ubuntu1804-test-container:3.1.1 python=3.6 +ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:3.1.1 python=3.8 diff --git a/test/lib/ansible_test/_data/completion/network.txt b/test/lib/ansible_test/_data/completion/network.txt index 8c6243e9a1d93d..1d6b0c196a51d6 100644 --- a/test/lib/ansible_test/_data/completion/network.txt +++ b/test/lib/ansible_test/_data/completion/network.txt @@ -1,2 +1,2 @@ -ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli provider=aws -vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli provider=aws +ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli provider=aws arch=x86_64 +vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli provider=aws arch=x86_64 diff --git a/test/lib/ansible_test/_data/completion/remote.txt b/test/lib/ansible_test/_data/completion/remote.txt index 97c9a677f2c025..4e56350277c0c1 100644 --- a/test/lib/ansible_test/_data/completion/remote.txt +++ b/test/lib/ansible_test/_data/completion/remote.txt @@ -1,10 +1,16 @@ -freebsd/12.2 python=3.7,2.7,3.8 python_dir=/usr/local/bin provider=aws -freebsd/13.0 python=3.7,2.7,3.8,3.9 python_dir=/usr/local/bin provider=aws -freebsd python_dir=/usr/local/bin provider=aws -macos/11.1 python=3.9 python_dir=/usr/local/bin provider=parallels -macos python_dir=/usr/local/bin provider=parallels -rhel/7.9 python=2.7 provider=aws -rhel/8.4 python=3.6,3.8 provider=aws -rhel provider=aws -aix/7.2 python=2.7,3.7 python_dir=/opt/freeware/bin provider=ibmps -aix python_dir=/opt/freeware/bin provider=ibmps +alpine/3.16 python=3.10 become=doas_sudo provider=aws arch=x86_64 +alpine become=doas_sudo provider=aws arch=x86_64 +fedora/36 python=3.10 become=sudo provider=aws arch=x86_64 +fedora become=sudo provider=aws arch=x86_64 +freebsd/12.2 python=3.7,2.7,3.8 python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64 +freebsd/13.0 python=3.7,2.7,3.8,3.9 python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64 +freebsd python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64 +macos/11.1 python=3.9 python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64 +macos python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64 +rhel/7.9 python=2.7 become=sudo provider=aws arch=x86_64 +rhel/8.4 python=3.6,3.8 become=sudo provider=aws arch=x86_64 +rhel/9.0 python=3.9 become=sudo provider=aws arch=x86_64 +rhel become=sudo provider=aws arch=x86_64 +ubuntu/20.04 python=3.8,3.9 become=sudo provider=aws arch=x86_64 +ubuntu/22.04 python=3.10 become=sudo provider=aws arch=x86_64 +ubuntu become=sudo provider=aws arch=x86_64 diff --git a/test/lib/ansible_test/_data/completion/windows.txt b/test/lib/ansible_test/_data/completion/windows.txt index 94868f066b0872..767c36cbcb9837 100644 --- a/test/lib/ansible_test/_data/completion/windows.txt +++ b/test/lib/ansible_test/_data/completion/windows.txt @@ -1,5 +1,6 @@ -windows/2012 provider=aws -windows/2012-R2 provider=aws -windows/2016 provider=aws -windows/2019 provider=aws -windows/2022 provider=aws +windows/2012 provider=aws arch=x86_64 +windows/2012-R2 provider=aws arch=x86_64 +windows/2016 provider=aws arch=x86_64 +windows/2019 provider=aws arch=x86_64 +windows/2022 provider=aws arch=x86_64 +windows provider=aws arch=x86_64 diff --git a/test/lib/ansible_test/_data/requirements/constraints.txt b/test/lib/ansible_test/_data/requirements/constraints.txt index 6eda167d0e4600..652ff3102ff7f9 100644 --- a/test/lib/ansible_test/_data/requirements/constraints.txt +++ b/test/lib/ansible_test/_data/requirements/constraints.txt @@ -9,6 +9,7 @@ wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python wheel < 0.38.0 ; python_version >= '2.7' and python_version < '3.7' # wheel 0.38.0 and later require python 3.7 or later idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6 +paramiko < 2.9.0 ; python_version >= '2.7' # paramiko 2.9.0+ requires changes to the paramiko_ssh connection plugin to work with older systems pytest < 3.3.0, >= 3.1.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6 pytest < 5.0.0, >= 4.5.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7 pytest >= 4.5.0 ; python_version > '2.7' # pytest 4.5.0 added support for --strict-markers diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py index e604a2b35a8458..18e776ad743dec 100644 --- a/test/lib/ansible_test/_internal/__init__.py +++ b/test/lib/ansible_test/_internal/__init__.py @@ -3,6 +3,7 @@ import os import sys +import typing as t # This import should occur as early as possible. # It must occur before subprocess has been imported anywhere in the current process. @@ -10,10 +11,14 @@ CURRENT_RLIMIT_NOFILE, ) +from .constants import ( + STATUS_HOST_CONNECTION_ERROR, +) + from .util import ( ApplicationError, + HostConnectionError, display, - MAXFD, ) from .delegation import ( @@ -57,16 +62,18 @@ def main(): display.truncate = config.truncate display.redact = config.redact display.color = config.color - display.info_stderr = config.info_stderr + display.fd = sys.stderr if config.display_stderr else sys.stdout configure_timeout(config) display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2) - display.info('MAXFD: %d' % MAXFD, verbosity=2) delegate_args = None target_names = None try: + if config.check_layout: + data_context().check_layout() + args.func(config) except PrimeContainers: pass @@ -78,20 +85,23 @@ def main(): delegate_args = (ex.host_state, ex.exclude, ex.require) if delegate_args: - # noinspection PyTypeChecker delegate(config, *delegate_args) if target_names: for target_name in target_names: - print(target_name) # info goes to stderr, this should be on stdout + print(target_name) # display goes to stderr, this should be on stdout display.review_warnings() config.success = True + except HostConnectionError as ex: + display.fatal(str(ex)) + ex.run_callback() + sys.exit(STATUS_HOST_CONNECTION_ERROR) except ApplicationWarning as ex: display.warning(u'%s' % ex) sys.exit(0) except ApplicationError as ex: - display.error(u'%s' % ex) + display.fatal(u'%s' % ex) sys.exit(1) except KeyboardInterrupt: sys.exit(2) diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py index 5c689bed484e85..0f250403857946 100644 --- a/test/lib/ansible_test/_internal/ansible_util.py +++ b/test/lib/ansible_test/_internal/ansible_util.py @@ -22,11 +22,11 @@ ANSIBLE_SOURCE_ROOT, ANSIBLE_TEST_TOOLS_ROOT, get_ansible_version, + raw_command, ) from .util_common import ( create_temp_dir, - run_command, ResultType, intercept_python, get_injector_path, @@ -51,6 +51,10 @@ PythonConfig, ) +from .thread import ( + mutex, +) + def parse_inventory(args, inventory_path): # type: (EnvironmentConfig, str) -> t.Dict[str, t.Any] """Return a dict parsed from the given inventory file.""" @@ -193,13 +197,14 @@ def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str] return env +@mutex def get_ansible_python_path(args): # type: (CommonConfig) -> str """ Return a directory usable for PYTHONPATH, containing only the ansible package. If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit. """ try: - return get_ansible_python_path.python_path + return get_ansible_python_path.python_path # type: ignore[attr-defined] except AttributeError: pass @@ -217,7 +222,7 @@ def get_ansible_python_path(args): # type: (CommonConfig) -> str if not args.explain: generate_egg_info(python_path) - get_ansible_python_path.python_path = python_path + get_ansible_python_path.python_path = python_path # type: ignore[attr-defined] return python_path @@ -259,12 +264,12 @@ def __init__(self, reason): # type: (str) -> None self.reason = reason -def get_collection_detail(args, python): # type: (EnvironmentConfig, PythonConfig) -> CollectionDetail +def get_collection_detail(python): # type: (PythonConfig) -> CollectionDetail """Return collection detail.""" collection = data_context().content.collection directory = os.path.join(collection.root, collection.directory) - stdout = run_command(args, [python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0] + stdout = raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True)[0] result = json.loads(stdout) error = result.get('error') @@ -283,15 +288,15 @@ def run_playbook( args, # type: EnvironmentConfig inventory_path, # type: str playbook, # type: str - run_playbook_vars=None, # type: t.Optional[t.Dict[str, t.Any]] - capture=False, # type: bool + capture, # type: bool + variables=None, # type: t.Optional[t.Dict[str, t.Any]] ): # type: (...) -> None """Run the specified playbook using the given inventory file and playbook variables.""" playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook) cmd = ['ansible-playbook', '-i', inventory_path, playbook_path] - if run_playbook_vars: - cmd.extend(['-e', json.dumps(run_playbook_vars)]) + if variables: + cmd.extend(['-e', json.dumps(variables)]) if args.verbosity: cmd.append('-%s' % ('v' * args.verbosity)) diff --git a/test/lib/ansible_test/_internal/become.py b/test/lib/ansible_test/_internal/become.py index dc0a208a62b6b4..5a5506a14e0d7b 100644 --- a/test/lib/ansible_test/_internal/become.py +++ b/test/lib/ansible_test/_internal/become.py @@ -5,9 +5,18 @@ import shlex import typing as t +from .util import ( + get_subclasses, +) + class Become(metaclass=abc.ABCMeta): """Base class for become implementations.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return cls.__name__.lower() + @property @abc.abstractmethod def method(self): # type: () -> str @@ -18,6 +27,38 @@ def prepare_command(self, command): # type: (t.List[str]) -> t.List[str] """Return the given command, if any, with privilege escalation.""" +class Doas(Become): + """Become using 'doas'.""" + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + raise NotImplementedError('Ansible has no built-in doas become plugin.') + + def prepare_command(self, command): # type: (t.List[str]) -> t.List[str] + """Return the given command, if any, with privilege escalation.""" + become = ['doas', '-n'] + + if command: + become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)]) + else: + become.extend(['-s']) + + return become + + +class DoasSudo(Doas): + """Become using 'doas' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return 'doas_sudo' + + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + return 'sudo' + + class Su(Become): """Become using 'su'.""" @property @@ -35,6 +76,19 @@ def prepare_command(self, command): # type: (t.List[str]) -> t.List[str] return become +class SuSudo(Su): + """Become using 'su' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands.""" + @classmethod + def name(cls): + """The name of this plugin.""" + return 'su_sudo' + + @property + def method(self): # type: () -> str + """The name of the Ansible become plugin that is equivalent to this.""" + return 'sudo' + + class Sudo(Become): """Become using 'sudo'.""" @property @@ -50,3 +104,6 @@ def prepare_command(self, command): # type: (t.List[str]) -> t.List[str] become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)]) return become + + +SUPPORTED_BECOME_METHODS = {cls.name(): cls for cls in get_subclasses(Become)} diff --git a/test/lib/ansible_test/_internal/bootstrap.py b/test/lib/ansible_test/_internal/bootstrap.py index 9eb26de7d2ec38..326973978a9a28 100644 --- a/test/lib/ansible_test/_internal/bootstrap.py +++ b/test/lib/ansible_test/_internal/bootstrap.py @@ -35,8 +35,8 @@ def bootstrap_type(self): # type: () -> str """The bootstrap type to pass to the bootstrapping script.""" return self.__class__.__name__.replace('Bootstrap', '').lower() - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', @@ -65,8 +65,8 @@ def get_script(self): # type: () -> str @dataclasses.dataclass class BootstrapDocker(Bootstrap): """Bootstrap docker instances.""" - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" variables = super().get_variables() variables.update( @@ -83,8 +83,8 @@ class BootstrapRemote(Bootstrap): platform: str platform_version: str - def get_variables(self): # type: () -> t.Dict[str, str] - """The variables to template in the boostrapping script.""" + def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] + """The variables to template in the bootstrapping script.""" variables = super().get_variables() variables.update( diff --git a/test/lib/ansible_test/_internal/cgroup.py b/test/lib/ansible_test/_internal/cgroup.py new file mode 100644 index 00000000000000..52779599fc1311 --- /dev/null +++ b/test/lib/ansible_test/_internal/cgroup.py @@ -0,0 +1,110 @@ +"""Linux control group constants, classes and utilities.""" +from __future__ import annotations + +import codecs +import dataclasses +import pathlib +import re + + +class CGroupPath: + """Linux cgroup path constants.""" + ROOT = '/sys/fs/cgroup' + SYSTEMD = '/sys/fs/cgroup/systemd' + SYSTEMD_RELEASE_AGENT = '/sys/fs/cgroup/systemd/release_agent' + + +class MountType: + """Linux filesystem mount type constants.""" + TMPFS = 'tmpfs' + CGROUP_V1 = 'cgroup' + CGROUP_V2 = 'cgroup2' + + +@dataclasses.dataclass(frozen=True) +class CGroupEntry: + """A single cgroup entry parsed from '/proc/{pid}/cgroup' in the proc filesystem.""" + id: int + subsystem: str + path: pathlib.PurePosixPath + + @property + def root_path(self): + """The root path for this cgroup subsystem.""" + return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem) + + @property + def full_path(self) -> pathlib.PurePosixPath: + """The full path for this cgroup subsystem.""" + return pathlib.PurePosixPath(self.root_path, str(self.path).lstrip('/')) + + @classmethod + def parse(cls, value: str) -> CGroupEntry: + """Parse the given cgroup line from the proc filesystem and return a cgroup entry.""" + cid, subsystem, path = value.split(':') + + return cls( + id=int(cid), + subsystem=re.sub('^name=', '', subsystem), + path=pathlib.PurePosixPath(path) + ) + + @classmethod + def loads(cls, value: str) -> tuple[CGroupEntry, ...]: + """Parse the given output from the proc filesystem and return a tuple of cgroup entries.""" + return tuple(cls.parse(line) for line in value.splitlines()) + + +@dataclasses.dataclass(frozen=True) +class MountEntry: + """A single mount info entry parsed from '/proc/{pid}/mountinfo' in the proc filesystem.""" + mount_id: int + parent_id: int + device_major: int + device_minor: int + root: pathlib.PurePosixPath + path: pathlib.PurePosixPath + options: tuple[str, ...] + fields: tuple[str, ...] + type: str + source: pathlib.PurePosixPath + super_options: tuple[str, ...] + + @classmethod + def parse(cls, value: str) -> MountEntry: + """Parse the given mount info line from the proc filesystem and return a mount entry.""" + # See: https://man7.org/linux/man-pages/man5/proc.5.html + # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L135 + mount_id, parent_id, device_major_minor, root, path, options, *remainder = value.split(' ') + fields = remainder[:-4] + separator, mtype, source, super_options = remainder[-4:] + + assert separator == '-' + + device_major, device_minor = device_major_minor.split(':') + + return cls( + mount_id=int(mount_id), + parent_id=int(parent_id), + device_major=int(device_major), + device_minor=int(device_minor), + root=_decode_path(root), + path=_decode_path(path), + options=tuple(options.split(',')), + fields=tuple(fields), + type=mtype, + source=_decode_path(source), + super_options=tuple(super_options.split(',')), + ) + + @classmethod + def loads(cls, value: str) -> tuple[MountEntry, ...]: + """Parse the given output from the proc filesystem and return a tuple of mount info entries.""" + return tuple(cls.parse(line) for line in value.splitlines()) + + +def _decode_path(value: str) -> pathlib.PurePosixPath: + """Decode and return a path which may contain octal escape sequences.""" + # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L150 + path = re.sub(r'(\\[0-7]{3})', lambda m: codecs.decode(m.group(0).encode('ascii'), 'unicode_escape'), value) + return pathlib.PurePosixPath(path) diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py index db5ca501f4a628..3d0f79e83af7c0 100644 --- a/test/lib/ansible_test/_internal/ci/__init__.py +++ b/test/lib/ansible_test/_internal/ci/__init__.py @@ -114,7 +114,7 @@ def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None def initialize_private_key(self): # type: () -> str """ Initialize and publish a new key pair (if needed) and return the private key. - The private key is cached across ansible-test invocations so it is only generated and published once per CI job. + The private key is cached across ansible-test invocations, so it is only generated and published once per CI job. """ path = os.path.expanduser('~/.ansible-core-ci-private.key') @@ -166,14 +166,12 @@ def generate_private_key(self): # type: () -> str private_key = ec.generate_private_key(ec.SECP384R1(), default_backend()) public_key = private_key.public_key() - # noinspection PyUnresolvedReferences - private_key_pem = to_text(private_key.private_bytes( + private_key_pem = to_text(private_key.private_bytes( # type: ignore[attr-defined] # documented method, but missing from type stubs encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), )) - # noinspection PyTypeChecker public_key_pem = to_text(public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo, diff --git a/test/lib/ansible_test/_internal/classification/__init__.py b/test/lib/ansible_test/_internal/classification/__init__.py index 532fa680e8bbd8..c599d36edf0af6 100644 --- a/test/lib/ansible_test/_internal/classification/__init__.py +++ b/test/lib/ansible_test/_internal/classification/__init__.py @@ -15,6 +15,7 @@ walk_sanity_targets, load_integration_prefixes, analyze_integration_target_dependencies, + IntegrationTarget, ) from ..util import ( @@ -63,14 +64,14 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, 'integration': set(), 'windows-integration': set(), 'network-integration': set(), - } + } # type: t.Dict[str, t.Set[str]] focused_commands = collections.defaultdict(set) - deleted_paths = set() - original_paths = set() - additional_paths = set() - no_integration_paths = set() + deleted_paths = set() # type: t.Set[str] + original_paths = set() # type: t.Set[str] + additional_paths = set() # type: t.Set[str] + no_integration_paths = set() # type: t.Set[str] for path in paths: if not os.path.exists(path): @@ -110,7 +111,7 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, tests = all_tests(args) # not categorized, run all tests display.warning('Path not categorized: %s' % path) else: - focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths + focused_target = bool(tests.pop(FOCUSED_TARGET, None)) and path in original_paths tests = dict((key, value) for key, value in tests.items() if value) @@ -155,18 +156,18 @@ def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, if any(target == 'all' for target in targets): commands[command] = {'all'} - commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets) + sorted_commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets) focused_commands = dict((c, sorted(targets)) for c, targets in focused_commands.items()) - for command, targets in commands.items(): + for command, targets in sorted_commands.items(): if targets == ['all']: - commands[command] = [] # changes require testing all targets, do not filter targets + sorted_commands[command] = [] # changes require testing all targets, do not filter targets changes = ChangeDescription() changes.command = verbose_command changes.changed_paths = sorted(original_paths) changes.deleted_paths = sorted(deleted_paths) - changes.regular_command_targets = commands + changes.regular_command_targets = sorted_commands changes.focused_command_targets = focused_commands changes.no_integration_paths = sorted(no_integration_paths) @@ -205,11 +206,11 @@ def __init__(self, args): # type: (TestConfig) -> None self.prefixes = load_integration_prefixes() self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets) - self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed - self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed - self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed + self.python_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed + self.powershell_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed + self.csharp_module_utils_imports = {} # type: t.Dict[str, t.Set[str]] # populated on first use to reduce overhead when not needed - self.paths_to_dependent_targets = {} + self.paths_to_dependent_targets = {} # type: t.Dict[str, t.Set[IntegrationTarget]] for target in self.integration_targets: for path in target.needs_file: @@ -341,7 +342,7 @@ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] filename = os.path.basename(path) name, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if os.path.sep not in path: if filename in ( @@ -372,7 +373,7 @@ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] 'integration': target.name if 'posix/' in target.aliases else None, 'windows-integration': target.name if 'windows/' in target.aliases else None, 'network-integration': target.name if 'network/' in target.aliases else None, - FOCUSED_TARGET: True, + FOCUSED_TARGET: target.name, } if is_subdir(path, data_context().content.integration_path): @@ -430,7 +431,7 @@ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None, 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None, 'network-integration': self.network_integration_by_module.get(module_name), - FOCUSED_TARGET: True, + FOCUSED_TARGET: module_name, } return minimal @@ -582,7 +583,7 @@ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]] 'windows-integration': target.name if target and 'windows/' in target.aliases else None, 'network-integration': target.name if target and 'network/' in target.aliases else None, 'units': units_path, - FOCUSED_TARGET: target is not None, + FOCUSED_TARGET: target.name if target else None, } if is_subdir(path, data_context().content.plugin_paths['filter']): @@ -630,7 +631,7 @@ def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, s filename = os.path.basename(path) dummy, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if path.startswith('changelogs/'): return minimal @@ -674,7 +675,7 @@ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str] filename = os.path.basename(path) name, ext = os.path.splitext(filename) - minimal = {} + minimal = {} # type: t.Dict[str, str] if path.startswith('bin/'): return all_tests(self.args) # broad impact, run all tests @@ -721,7 +722,6 @@ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str] if path.startswith('test/lib/ansible_test/config/'): if name.startswith('cloud-config-'): - # noinspection PyTypeChecker cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0] if cloud_target in self.integration_targets_by_alias: @@ -746,13 +746,13 @@ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str] if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'): return { 'sanity': 'all', # test infrastructure, run all sanity checks - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_internal/commands/units/'): return { 'units': 'all', # test infrastructure, run all unit tests - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_data/requirements/'): @@ -776,13 +776,13 @@ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str] if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'): return { 'sanity': 'all', # test infrastructure, run all sanity checks - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/ansible_test/_util/target/pytest/'): return { 'units': 'all', # test infrastructure, run all unit tests - 'integration': 'ansible-test', # run ansible-test self tests + 'integration': 'ansible-test/', # run ansible-test self tests } if path.startswith('test/lib/'): diff --git a/test/lib/ansible_test/_internal/classification/powershell.py b/test/lib/ansible_test/_internal/classification/powershell.py index 72715de00b52f8..bc73b7487c005f 100644 --- a/test/lib/ansible_test/_internal/classification/powershell.py +++ b/test/lib/ansible_test/_internal/classification/powershell.py @@ -83,7 +83,7 @@ def extract_powershell_module_utils_imports(path, module_utils): # type: (str, for line in lines: line_number += 1 - match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line) + match = re.search(r'(?i)^#\s*(?:requires\s+-modules?|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line) if not match: continue diff --git a/test/lib/ansible_test/_internal/classification/python.py b/test/lib/ansible_test/_internal/classification/python.py index ac2d99a7563bf9..d81b459b24c3d4 100644 --- a/test/lib/ansible_test/_internal/classification/python.py +++ b/test/lib/ansible_test/_internal/classification/python.py @@ -236,7 +236,7 @@ class ModuleUtilFinder(ast.NodeVisitor): def __init__(self, path, module_utils): # type: (str, t.Set[str]) -> None self.path = path self.module_utils = module_utils - self.imports = set() + self.imports = set() # type: t.Set[str] # implicitly import parent package @@ -277,7 +277,6 @@ def __init__(self, path, module_utils): # type: (str, t.Set[str]) -> None # While that will usually be true, there are exceptions which will result in this resolution being incorrect. self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path)) - # noinspection PyPep8Naming # pylint: disable=locally-disabled, invalid-name def visit_Import(self, node): # type: (ast.Import) -> None """Visit an import node.""" @@ -287,7 +286,6 @@ def visit_Import(self, node): # type: (ast.Import) -> None # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE] self.add_imports([alias.name for alias in node.names], node.lineno) - # noinspection PyPep8Naming # pylint: disable=locally-disabled, invalid-name def visit_ImportFrom(self, node): # type: (ast.ImportFrom) -> None """Visit an import from node.""" diff --git a/test/lib/ansible_test/_internal/cli/__init__.py b/test/lib/ansible_test/_internal/cli/__init__.py index 21c45b6e328382..dad678beb36399 100644 --- a/test/lib/ansible_test/_internal/cli/__init__.py +++ b/test/lib/ansible_test/_internal/cli/__init__.py @@ -13,23 +13,26 @@ do_commands, ) +from .epilog import ( + get_epilog, +) from .compat import ( HostSettings, convert_legacy_args, ) +from ..util import ( + get_ansible_version, +) + def parse_args(): # type: () -> argparse.Namespace """Parse command line arguments.""" completer = CompositeActionCompletionFinder() - if completer.enabled: - epilog = 'Tab completion available using the "argcomplete" python package.' - else: - epilog = 'Install the "argcomplete" python package to enable tab completion.' - - parser = argparse.ArgumentParser(epilog=epilog) + parser = argparse.ArgumentParser(prog='ansible-test', epilog=get_epilog(completer), formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('--version', action='version', version=f'%(prog)s version {get_ansible_version()}') do_commands(parser, completer) diff --git a/test/lib/ansible_test/_internal/cli/argparsing/__init__.py b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py index 8a087ebf8f8fa2..66dfc4e4a0fab7 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/__init__.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py @@ -37,7 +37,7 @@ class RegisteredCompletionFinder(OptionCompletionFinder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.registered_completions = None # type: t.Optional[str] + self.registered_completions = None # type: t.Optional[t.List[str]] def completer( self, @@ -88,20 +88,18 @@ class CompositeAction(argparse.Action, metaclass=abc.ABCMeta): """Base class for actions that parse composite arguments.""" documentation_state = {} # type: t.Dict[t.Type[CompositeAction], DocumentationState] - # noinspection PyUnusedLocal def __init__( self, *args, - dest, # type: str **kwargs, ): - del dest - self.definition = self.create_parser() self.documentation_state[type(self)] = documentation_state = DocumentationState() self.definition.document(documentation_state) - super().__init__(*args, dest=self.definition.dest, **kwargs) + kwargs.update(dest=self.definition.dest) + + super().__init__(*args, **kwargs) register_safe_action(type(self)) @@ -139,10 +137,12 @@ class CompositeActionCompletionFinder(RegisteredCompletionFinder): def get_completions( self, prefix, # type: str - action, # type: CompositeAction + action, # type: argparse.Action parsed_args, # type: argparse.Namespace ): # type: (...) -> t.List[str] """Return a list of completions appropriate for the given prefix and action, taking into account the arguments that have already been parsed.""" + assert isinstance(action, CompositeAction) + state = ParserState( mode=ParserMode.LIST if self.list_mode else ParserMode.COMPLETE, remainder=prefix, @@ -238,6 +238,8 @@ def complete( """Perform argument completion using the given completer and return the completion result.""" value = state.remainder + answer: Completion + try: completer.parse(state) raise ParserError('completion expected') diff --git a/test/lib/ansible_test/_internal/cli/argparsing/actions.py b/test/lib/ansible_test/_internal/cli/argparsing/actions.py index c2b573e6397a10..e3d0fd1c7518b6 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/actions.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/actions.py @@ -7,8 +7,8 @@ class EnumAction(argparse.Action): - """Parse an enum using the lowercases enum names.""" - def __init__(self, **kwargs): # type: (t.Dict[str, t.Any]) -> None + """Parse an enum using the lowercase enum names.""" + def __init__(self, **kwargs: t.Any) -> None: self.enum_type = kwargs.pop('type', None) # type: t.Type[enum.Enum] kwargs.setdefault('choices', tuple(e.name.lower() for e in self.enum_type)) super().__init__(**kwargs) diff --git a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py index fe80a68e5d0312..dcff978c9a89ba 100644 --- a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py +++ b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py @@ -173,7 +173,7 @@ def set_namespace(self, namespace): # type: (t.Any) -> None self.namespaces.append(namespace) @contextlib.contextmanager - def delimit(self, delimiters, required=True): # type: (str, bool) -> t.ContextManager[ParserBoundary] + def delimit(self, delimiters, required=True): # type: (str, bool) -> t.Iterator[ParserBoundary] """Context manager for delimiting parsing of input.""" boundary = ParserBoundary(delimiters=delimiters, required=required) @@ -286,6 +286,19 @@ def document(self, state): # type: (DocumentationState) -> t.Optional[str] return '|'.join(self.choices) +class EnumValueChoicesParser(ChoicesParser): + """Composite argument parser which relies on a static list of choices derived from the values of an enum.""" + def __init__(self, enum_type: t.Type[enum.Enum], conditions: MatchConditions = MatchConditions.CHOICE) -> None: + self.enum_type = enum_type + + super().__init__(choices=[str(item.value) for item in enum_type], conditions=conditions) + + def parse(self, state: ParserState) -> t.Any: + """Parse the input from the given state and return the result.""" + value = super().parse(state) + return self.enum_type(value) + + class IntegerParser(DynamicChoicesParser): """Composite argument parser for integers.""" PATTERN = re.compile('^[1-9][0-9]*$') @@ -394,7 +407,7 @@ def parse(self, state): # type: (ParserState) -> str else: path = '' - with state.delimit(PATH_DELIMITER, required=False) as boundary: + with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: directory = path or '.' @@ -420,7 +433,7 @@ def parse(self, state): # type: (ParserState) -> t.Any """Parse the input from the given state and return the result.""" path = '' - with state.delimit(PATH_DELIMITER, required=False) as boundary: + with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: if path: path += AnyParser(nothing=True).parse(state) @@ -506,7 +519,7 @@ def parse(self, state): # type: (ParserState) -> t.Any parsers = self.get_parsers(state) keys = list(parsers) - with state.delimit(PAIR_DELIMITER, required=False) as pair: + with state.delimit(PAIR_DELIMITER, required=False) as pair: # type: ParserBoundary while pair.ready: with state.delimit(ASSIGNMENT_DELIMITER): key = ChoicesParser(keys).parse(state) @@ -528,7 +541,7 @@ def parse(self, state): # type: (ParserState) -> t.Any state.set_namespace(namespace) - with state.delimit(self.delimiter, self.required) as boundary: + with state.delimit(self.delimiter, self.required) as boundary: # type: ParserBoundary choice = self.get_left_parser(state).parse(state) if boundary.match: diff --git a/test/lib/ansible_test/_internal/cli/commands/__init__.py b/test/lib/ansible_test/_internal/cli/commands/__init__.py index 5cd37f4f915bc7..81bb465372f017 100644 --- a/test/lib/ansible_test/_internal/cli/commands/__init__.py +++ b/test/lib/ansible_test/_internal/cli/commands/__init__.py @@ -11,6 +11,7 @@ from ..completers import ( complete_target, + register_completer, ) from ..environments import ( @@ -110,33 +111,33 @@ def do_commands( testing = test.add_argument_group(title='common testing arguments') - testing.add_argument( + register_completer(testing.add_argument( 'include', metavar='TARGET', nargs='*', help='test the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--include', metavar='TARGET', action='append', help='include the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--exclude', metavar='TARGET', action='append', help='exclude the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) - testing.add_argument( + register_completer(testing.add_argument( '--require', metavar='TARGET', action='append', help='require the specified target', - ).completer = functools.partial(complete_target, completer) + ), functools.partial(complete_target, completer)) testing.add_argument( '--coverage', diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py index f79fb1cfc28bb1..7ef28919a4ef76 100644 --- a/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py +++ b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py @@ -5,6 +5,7 @@ from ...completers import ( complete_target, + register_completer, ) from ...environments import ( @@ -43,12 +44,12 @@ def do_integration( def add_integration_common( parser, # type: argparse.ArgumentParser ): - """Add common integration argumetns.""" - parser.add_argument( + """Add common integration arguments.""" + register_completer(parser.add_argument( '--start-at', metavar='TARGET', help='start at the specified target', - ).completer = complete_target + ), complete_target) parser.add_argument( '--start-at-task', diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/network.py b/test/lib/ansible_test/_internal/cli/commands/integration/network.py index d070afda9b0212..86729195b0f70e 100644 --- a/test/lib/ansible_test/_internal/cli/commands/integration/network.py +++ b/test/lib/ansible_test/_internal/cli/commands/integration/network.py @@ -28,6 +28,10 @@ add_environments, ) +from ...completers import ( + register_completer, +) + def do_network_integration( subparsers, @@ -51,16 +55,16 @@ def do_network_integration( add_integration_common(network_integration) - network_integration.add_argument( + register_completer(network_integration.add_argument( '--testcase', metavar='TESTCASE', help='limit a test to a specified testcase', - ).completer = complete_network_testcase + ), complete_network_testcase) add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NETWORK_INTEGRATION) # network-integration -def complete_network_testcase(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_testcase(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of test cases matching the given prefix if only one target was parsed from the command line, otherwise return an empty list.""" testcases = [] diff --git a/test/lib/ansible_test/_internal/cli/commands/shell.py b/test/lib/ansible_test/_internal/cli/commands/shell.py index 301ff70e905eb5..7d52b39e058cb2 100644 --- a/test/lib/ansible_test/_internal/cli/commands/shell.py +++ b/test/lib/ansible_test/_internal/cli/commands/shell.py @@ -38,10 +38,22 @@ def do_shell( shell = parser.add_argument_group(title='shell arguments') + shell.add_argument( + 'cmd', + nargs='*', + help='run the specified command', + ) + shell.add_argument( '--raw', action='store_true', help='direct to shell with no setup', ) + shell.add_argument( + '--export', + metavar='PATH', + help='export inventory instead of opening a shell', + ) + add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SHELL) # shell diff --git a/test/lib/ansible_test/_internal/cli/compat.py b/test/lib/ansible_test/_internal/cli/compat.py index 2090aac711ba5e..0a23c2306f3875 100644 --- a/test/lib/ansible_test/_internal/cli/compat.py +++ b/test/lib/ansible_test/_internal/cli/compat.py @@ -55,7 +55,7 @@ ) -def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.List[str]]) -> t.Optional[str] +def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.Sequence[str]]) -> t.Optional[str] """If a Python version is given and is in the given version list, return that Python version, otherwise return None.""" return version if version in versions else None @@ -115,6 +115,7 @@ class LegacyHostOptions: venv_system_site_packages: t.Optional[bool] = None remote: t.Optional[str] = None remote_provider: t.Optional[str] = None + remote_arch: t.Optional[str] = None docker: t.Optional[str] = None docker_privileged: t.Optional[bool] = None docker_seccomp: t.Optional[str] = None @@ -201,6 +202,9 @@ def convert_legacy_args( '--controller', '--target', '--target-python', + '--target-posix', + '--target-windows', + '--target-network', ] used_old_options = old_options.get_options_used() @@ -237,8 +241,8 @@ def convert_legacy_args( args.targets = targets if used_default_pythons: - targets = t.cast(t.List[ControllerConfig], targets) - skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in targets})) + control_targets = t.cast(t.List[ControllerConfig], targets) + skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in control_targets})) else: skipped_python_versions = [] @@ -260,10 +264,12 @@ def controller_targets( mode, # type: TargetMode options, # type: LegacyHostOptions controller, # type: ControllerHostConfig -): # type: (...) -> t.List[ControllerConfig] +): # type: (...) -> t.List[HostConfig] """Return the configuration for controller targets.""" python = native_python(options) + targets: t.List[HostConfig] + if python: targets = [ControllerConfig(python=python)] else: @@ -283,7 +289,7 @@ def native_python(options): # type: (LegacyHostOptions) -> t.Optional[NativePyt def get_legacy_host_config( mode, # type: TargetMode options, # type: LegacyHostOptions -): # type: (...) -> t.Tuple[HostConfig, t.List[HostConfig], t.Optional[FallbackDetail]] +): # type: (...) -> t.Tuple[ControllerHostConfig, t.List[HostConfig], t.Optional[FallbackDetail]] """ Returns controller and target host configs derived from the provided legacy host options. The goal is to match the original behavior, by using non-split testing whenever possible. @@ -296,6 +302,9 @@ def get_legacy_host_config( controller_fallback = None # type: t.Optional[t.Tuple[str, str, FallbackReason]] + controller: t.Optional[ControllerHostConfig] + targets: t.List[HostConfig] + if options.venv: if controller_python(options.python) or not options.python: controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages)) @@ -304,14 +313,21 @@ def get_legacy_host_config( controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages)) if mode in (TargetMode.SANITY, TargetMode.UNITS): - targets = controller_targets(mode, options, controller) + python = native_python(options) + + if python: + control_targets = [ControllerConfig(python=python)] + else: + control_targets = controller.get_default_targets(HostContext(controller_config=controller)) # Target sanity tests either have no Python requirements or manage their own virtual environments. - # Thus there is no point in setting up virtual environments ahead of time for them. + # Thus, there is no point in setting up virtual environments ahead of time for them. if mode == TargetMode.UNITS: targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path, - system_site_packages=options.venv_system_site_packages)) for target in targets] + system_site_packages=options.venv_system_site_packages)) for target in control_targets] + else: + targets = t.cast(t.List[HostConfig], control_targets) else: targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))] @@ -359,33 +375,34 @@ def get_legacy_host_config( if remote_config.controller_supported: if controller_python(options.python) or not options.python: - controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, + arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON - controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT controller = None - targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)] + targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)] elif mode == TargetMode.SHELL and options.remote.startswith('windows/'): if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS: raise ControllerNotSupportedError(f'--python {options.python}') controller = OriginConfig(python=native_python(options)) - targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider)] + targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)] else: if not options.python: raise PythonVersionUnspecifiedError(f'--remote {options.remote}') if controller_python(options.python): - controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider) + controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch) targets = controller_targets(mode, options, controller) else: context, reason = f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON controller = None - targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)] + targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)] if not controller: if docker_available(): @@ -443,22 +460,25 @@ def handle_non_posix_targets( """Return a list of non-POSIX targets if the target mode is non-POSIX.""" if mode == TargetMode.WINDOWS_INTEGRATION: if options.windows: - targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider) for version in options.windows] + targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider, arch=options.remote_arch) + for version in options.windows] else: targets = [WindowsInventoryConfig(path=options.inventory)] elif mode == TargetMode.NETWORK_INTEGRATION: if options.platform: - targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider) for platform in options.platform] + network_targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider, arch=options.remote_arch) for platform in options.platform] for platform, collection in options.platform_collection or []: - for entry in targets: + for entry in network_targets: if entry.platform == platform: entry.collection = collection for platform, connection in options.platform_connection or []: - for entry in targets: + for entry in network_targets: if entry.platform == platform: entry.connection = connection + + targets = t.cast(t.List[HostConfig], network_targets) else: targets = [NetworkInventoryConfig(path=options.inventory)] @@ -470,12 +490,14 @@ def default_targets( controller, # type: ControllerHostConfig ): # type: (...) -> t.List[HostConfig] """Return a list of default targets for the given target mode.""" + targets: t.List[HostConfig] + if mode == TargetMode.WINDOWS_INTEGRATION: targets = [WindowsInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.winrm')))] elif mode == TargetMode.NETWORK_INTEGRATION: targets = [NetworkInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.networking')))] elif mode.multiple_pythons: - targets = controller.get_default_targets(HostContext(controller_config=controller)) + targets = t.cast(t.List[HostConfig], controller.get_default_targets(HostContext(controller_config=controller))) else: targets = [ControllerConfig()] diff --git a/test/lib/ansible_test/_internal/cli/completers.py b/test/lib/ansible_test/_internal/cli/completers.py index a4b9c04f4ee5bd..278b106251c697 100644 --- a/test/lib/ansible_test/_internal/cli/completers.py +++ b/test/lib/ansible_test/_internal/cli/completers.py @@ -13,14 +13,19 @@ ) -def complete_target(completer, prefix, parsed_args, **_): # type: (OptionCompletionFinder, str, argparse.Namespace, ...) -> t.List[str] +def complete_target(completer: OptionCompletionFinder, prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Perform completion for the targets configured for the command being parsed.""" matches = find_target_completion(parsed_args.targets_func, prefix, completer.list_mode) completer.disable_completion_mangling = completer.list_mode and len(matches) > 1 return matches -def complete_choices(choices, prefix, **_): # type: (t.List[str], str, ...) -> t.List[str] +def complete_choices(choices: t.List[str], prefix: str, **_) -> t.List[str]: """Perform completion using the provided choices.""" matches = [choice for choice in choices if choice.startswith(prefix)] return matches + + +def register_completer(action: argparse.Action, completer) -> None: + """Register the given completer with the specified action.""" + action.completer = completer # type: ignore[attr-defined] # intentionally using an attribute that does not exist diff --git a/test/lib/ansible_test/_internal/cli/environments.py b/test/lib/ansible_test/_internal/cli/environments.py index 3c0230ca1cd67a..1495f8efcd98bc 100644 --- a/test/lib/ansible_test/_internal/cli/environments.py +++ b/test/lib/ansible_test/_internal/cli/environments.py @@ -13,6 +13,10 @@ SUPPORTED_PYTHON_VERSIONS, ) +from ..util import ( + REMOTE_ARCHITECTURES, +) + from ..completion import ( docker_completion, network_completion, @@ -53,12 +57,17 @@ from .completers import ( complete_choices, + register_completer, ) from .converters import ( key_value_type, ) +from .epilog import ( + get_epilog, +) + from ..ci import ( get_ci_provider, ) @@ -98,6 +107,8 @@ def add_environments( if not get_ci_provider().supports_core_ci_auth(): sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.') + sections.append(get_epilog(completer)) + parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.epilog = '\n\n'.join(sections) @@ -169,40 +180,40 @@ def register_action_type(action_type): # type: (t.Type[CompositeAction]) -> t.T if controller_mode == ControllerMode.NO_DELEGATION: composite_parser.set_defaults(controller=None) else: - composite_parser.add_argument( + register_completer(composite_parser.add_argument( '--controller', metavar='OPT', action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction), help='configuration for the controller', - ).completer = completer.completer + ), completer.completer) if target_mode == TargetMode.NO_TARGETS: composite_parser.set_defaults(targets=[]) elif target_mode == TargetMode.SHELL: group = composite_parser.add_mutually_exclusive_group() - group.add_argument( + register_completer(group.add_argument( '--target-posix', metavar='OPT', action=register_action_type(PosixSshTargetAction), help='configuration for the target', - ).completer = completer.completer + ), completer.completer) suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - group.add_argument( + register_completer(group.add_argument( '--target-windows', metavar='OPT', action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction), help=suppress or 'configuration for the target', - ).completer = completer.completer + ), completer.completer) - group.add_argument( + register_completer(group.add_argument( '--target-network', metavar='OPT', action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction), help=suppress or 'configuration for the target', - ).completer = completer.completer + ), completer.completer) else: if target_mode.multiple_pythons: target_option = '--target-python' @@ -224,12 +235,12 @@ def register_action_type(action_type): # type: (t.Type[CompositeAction]) -> t.T target_action = target_actions[target_mode] - composite_parser.add_argument( + register_completer(composite_parser.add_argument( target_option, metavar='OPT', action=register_action_type(target_action), help=target_help, - ).completer = completer.completer + ), completer.completer) return action_types @@ -240,9 +251,8 @@ def add_legacy_environment_options( target_mode, # type: TargetMode ): """Add legacy options for controlling the test environment.""" - # noinspection PyTypeChecker - environment = parser.add_argument_group( - title='environment arguments (mutually exclusive with "composite environment arguments" below)') # type: argparse.ArgumentParser + environment: argparse.ArgumentParser = parser.add_argument_group( # type: ignore[assignment] # real type private + title='environment arguments (mutually exclusive with "composite environment arguments" below)') add_environments_python(environment, target_mode) add_environments_host(environment, controller_mode, target_mode) @@ -253,6 +263,8 @@ def add_environments_python( target_mode, # type: TargetMode ): # type: (...) -> None """Add environment arguments to control the Python version(s) used.""" + python_versions: t.Tuple[str, ...] + if target_mode.has_python: python_versions = SUPPORTED_PYTHON_VERSIONS else: @@ -278,8 +290,7 @@ def add_environments_host( target_mode # type: TargetMode ): # type: (...) -> None """Add environment arguments for the given host and argument modes.""" - # noinspection PyTypeChecker - environments_exclusive_group = environments_parser.add_mutually_exclusive_group() # type: argparse.ArgumentParser + environments_exclusive_group: argparse.ArgumentParser = environments_parser.add_mutually_exclusive_group() # type: ignore[assignment] # real type private add_environment_local(environments_exclusive_group) add_environment_venv(environments_exclusive_group, environments_parser) @@ -299,28 +310,28 @@ def add_environment_network( environments_parser, # type: argparse.ArgumentParser ): # type: (...) -> None """Add environment arguments for running on a windows host.""" - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform', metavar='PLATFORM', action='append', help='network platform/version', - ).completer = complete_network_platform + ), complete_network_platform) - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform-collection', type=key_value_type, metavar='PLATFORM=COLLECTION', action='append', help='collection used to test platform', - ).completer = complete_network_platform_collection + ), complete_network_platform_collection) - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--platform-connection', type=key_value_type, metavar='PLATFORM=CONNECTION', action='append', help='connection used to test platform', - ).completer = complete_network_platform_connection + ), complete_network_platform_connection) environments_parser.add_argument( '--inventory', @@ -333,12 +344,12 @@ def add_environment_windows( environments_parser, # type: argparse.ArgumentParser ): # type: (...) -> None """Add environment arguments for running on a windows host.""" - environments_parser.add_argument( + register_completer(environments_parser.add_argument( '--windows', metavar='VERSION', action='append', help='windows version', - ).completer = complete_windows + ), complete_windows) environments_parser.add_argument( '--inventory', @@ -386,6 +397,8 @@ def add_global_docker( docker_network=None, docker_terminate=None, prime_containers=False, + dev_systemd_debug=False, + dev_probe_cgroups=None, ) return @@ -417,6 +430,24 @@ def add_global_docker( help='download containers without running tests', ) + # Docker support isn't related to ansible-core-ci. + # However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options. + suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS + + parser.add_argument( + '--dev-systemd-debug', + action='store_true', + help=suppress or 'enable systemd debugging in containers', + ) + + parser.add_argument( + '--dev-probe-cgroups', + metavar='DIR', + nargs='?', + const='', + help=suppress or 'probe container cgroups, with optional log dir', + ) + def add_environment_docker( exclusive_parser, # type: argparse.ArgumentParser @@ -429,13 +460,13 @@ def add_environment_docker( else: docker_images = sorted(filter_completion(docker_completion(), controller_only=True)) - exclusive_parser.add_argument( + register_completer(exclusive_parser.add_argument( '--docker', metavar='IMAGE', nargs='?', const='default', help='run from a docker container', - ).completer = functools.partial(complete_choices, docker_images) + ), functools.partial(complete_choices, docker_images)) environments_parser.add_argument( '--docker-privileged', @@ -474,12 +505,12 @@ def add_global_remote( suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - parser.add_argument( + register_completer(parser.add_argument( '--remote-stage', metavar='STAGE', default='prod', help=suppress or 'remote stage to use: prod, dev', - ).completer = complete_remote_stage + ), complete_remote_stage) parser.add_argument( '--remote-endpoint', @@ -512,11 +543,11 @@ def add_environment_remote( suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS - exclusive_parser.add_argument( + register_completer(exclusive_parser.add_argument( '--remote', metavar='NAME', help=suppress or 'run from a remote instance', - ).completer = functools.partial(complete_choices, remote_platforms) + ), functools.partial(complete_choices, remote_platforms)) environments_parser.add_argument( '--remote-provider', @@ -525,25 +556,32 @@ def add_environment_remote( help=suppress or 'remote provider to use: %(choices)s', ) + environments_parser.add_argument( + '--remote-arch', + metavar='ARCH', + choices=REMOTE_ARCHITECTURES, + help=suppress or 'remote arch to use: %(choices)s', + ) + -def complete_remote_stage(prefix, **_): # type: (str, ...) -> t.List[str] +def complete_remote_stage(prefix: str, **_) -> t.List[str]: """Return a list of supported stages matching the given prefix.""" return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)] -def complete_windows(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line.""" return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)] -def complete_network_platform(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line.""" images = sorted(filter_completion(network_completion())) return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)] -def complete_network_platform_collection(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line.""" left = prefix.split('=')[0] images = sorted(set(image.platform for image in filter_completion(network_completion()).values())) @@ -551,7 +589,7 @@ def complete_network_platform_collection(prefix, parsed_args, **_): # type: (st return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])] -def complete_network_platform_connection(prefix, parsed_args, **_): # type: (str, argparse.Namespace, ...) -> t.List[str] +def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]: """Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line.""" left = prefix.split('=')[0] images = sorted(set(image.platform for image in filter_completion(network_completion()).values())) diff --git a/test/lib/ansible_test/_internal/cli/epilog.py b/test/lib/ansible_test/_internal/cli/epilog.py new file mode 100644 index 00000000000000..3800ff1c0c0ba1 --- /dev/null +++ b/test/lib/ansible_test/_internal/cli/epilog.py @@ -0,0 +1,23 @@ +"""Argument parsing epilog generation.""" +from __future__ import annotations + +from .argparsing import ( + CompositeActionCompletionFinder, +) + +from ..data import ( + data_context, +) + + +def get_epilog(completer: CompositeActionCompletionFinder) -> str: + """Generate and return the epilog to use for help output.""" + if completer.enabled: + epilog = 'Tab completion available using the "argcomplete" python package.' + else: + epilog = 'Install the "argcomplete" python package to enable tab completion.' + + if data_context().content.unsupported: + epilog += '\n\n' + data_context().explain_working_directory() + + return epilog diff --git a/test/lib/ansible_test/_internal/cli/parsers/__init__.py b/test/lib/ansible_test/_internal/cli/parsers/__init__.py index 25bac9167bf7c2..e870d9f8cae9a4 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/__init__.py +++ b/test/lib/ansible_test/_internal/cli/parsers/__init__.py @@ -73,7 +73,7 @@ class DelegatedControllerParser(ControllerNamespaceParser, TypeParser): """Composite argument parser for the controller when delegation is supported.""" def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = dict( + parsers: t.Dict[str, Parser] = dict( origin=OriginParser(), docker=DockerParser(controller=True), ) @@ -99,7 +99,7 @@ class PosixTargetParser(TargetNamespaceParser, TypeParser): """Composite argument parser for a POSIX target.""" def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = dict( + parsers: t.Dict[str, Parser] = dict( controller=ControllerParser(), docker=DockerParser(controller=False), ) @@ -142,7 +142,7 @@ def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] def get_internal_parsers(self, targets): # type: (t.List[WindowsConfig]) -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = {} + parsers = {} # type: t.Dict[str, Parser] if self.allow_inventory and not targets: parsers.update( @@ -184,7 +184,7 @@ def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser] def get_internal_parsers(self, targets): # type: (t.List[NetworkConfig]) -> t.Dict[str, Parser] """Return a dictionary of type names and type parsers.""" - parsers = {} + parsers = {} # type: t.Dict[str, Parser] if self.allow_inventory and not targets: parsers.update( diff --git a/test/lib/ansible_test/_internal/cli/parsers/helpers.py b/test/lib/ansible_test/_internal/cli/parsers/helpers.py index 8dc7a65c5828f5..03f3cb79bc8fbb 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/helpers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/helpers.py @@ -27,7 +27,7 @@ def get_docker_pythons(name, controller, strict): # type: (str, bool, bool) -> available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS if not image_config: - return [] if strict else available_pythons + return [] if strict else list(available_pythons) supported_pythons = [python for python in image_config.supported_pythons if python in available_pythons] @@ -40,7 +40,7 @@ def get_remote_pythons(name, controller, strict): # type: (str, bool, bool) -> available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS if not platform_config: - return [] if strict else available_pythons + return [] if strict else list(available_pythons) supported_pythons = [python for python in platform_config.supported_pythons if python in available_pythons] @@ -54,6 +54,6 @@ def get_controller_pythons(controller_config, strict): # type: (HostConfig, boo elif isinstance(controller_config, PosixRemoteConfig): pythons = get_remote_pythons(controller_config.name, False, strict) else: - pythons = SUPPORTED_PYTHON_VERSIONS + pythons = list(SUPPORTED_PYTHON_VERSIONS) return pythons diff --git a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py index b22705f7314c43..820f9c4b1c21d9 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py @@ -10,15 +10,29 @@ SUPPORTED_PYTHON_VERSIONS, ) +from ...completion import ( + AuditMode, + CGroupVersion, +) + +from ...util import ( + REMOTE_ARCHITECTURES, +) + from ...host_configs import ( OriginConfig, ) +from ...become import ( + SUPPORTED_BECOME_METHODS, +) + from ..argparsing.parsers import ( AnyParser, BooleanParser, ChoicesParser, DocumentationState, + EnumValueChoicesParser, IntegerParser, KeyValueParser, Parser, @@ -95,6 +109,8 @@ def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] return dict( python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default), seccomp=ChoicesParser(SECCOMP_CHOICES), + cgroup=EnumValueChoicesParser(CGroupVersion), + audit=EnumValueChoicesParser(AuditMode), privileged=BooleanParser(), memory=IntegerParser(), ) @@ -108,6 +124,8 @@ def document(self, state): # type: (DocumentationState) -> t.Optional[str] state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([ f' python={python_parser.document(state)}', f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}', + f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}', + f' audit={EnumValueChoicesParser(AuditMode).document(state)}', f' privileged={BooleanParser().document(state)}', f' memory={IntegerParser().document(state)} # bytes', ]) @@ -125,7 +143,9 @@ def __init__(self, name, controller): def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] """Return a dictionary of key names and value parsers.""" return dict( + become=ChoicesParser(list(SUPPORTED_BECOME_METHODS)), provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default), ) @@ -136,7 +156,9 @@ def document(self, state): # type: (DocumentationState) -> t.Optional[str] section_name = 'remote options' state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([ + f' become={ChoicesParser(list(SUPPORTED_BECOME_METHODS)).document(state)}', f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', f' python={python_parser.document(state)}', ]) @@ -149,6 +171,7 @@ def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] """Return a dictionary of key names and value parsers.""" return dict( provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), ) def document(self, state): # type: (DocumentationState) -> t.Optional[str] @@ -157,6 +180,7 @@ def document(self, state): # type: (DocumentationState) -> t.Optional[str] state.sections[f'target {section_name} (comma separated):'] = '\n'.join([ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', ]) return f'{{{section_name}}}' @@ -168,6 +192,7 @@ def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] """Return a dictionary of key names and value parsers.""" return dict( provider=ChoicesParser(REMOTE_PROVIDERS), + arch=ChoicesParser(REMOTE_ARCHITECTURES), collection=AnyParser(), connection=AnyParser(), ) @@ -178,7 +203,8 @@ def document(self, state): # type: (DocumentationState) -> t.Optional[str] state.sections[f'target {section_name} (comma separated):'] = '\n'.join([ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}', - ' collection={collecton}', + f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}', + ' collection={collection}', ' connection={connection}', ]) diff --git a/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py index 1aae88216f44c2..d09ab7cc2118dc 100644 --- a/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py +++ b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py @@ -5,6 +5,7 @@ from ...host_configs import ( NativePythonConfig, + PythonConfig, VirtualPythonConfig, ) @@ -18,6 +19,7 @@ Parser, ParserError, ParserState, + ParserBoundary, ) @@ -58,7 +60,7 @@ class PythonParser(Parser): The origin host and unknown environments assume all relevant Python versions are available. """ def __init__(self, - versions, # type: t.List[str] + versions, # type: t.Sequence[str] *, allow_default, # type: bool allow_venv, # type: bool @@ -85,9 +87,13 @@ def __init__(self, def parse(self, state): # type: (ParserState) -> t.Any """Parse the input from the given state and return the result.""" + boundary: ParserBoundary + with state.delimit('@/', required=False) as boundary: version = ChoicesParser(self.first_choices).parse(state) + python: PythonConfig + if version == 'venv': with state.delimit('@/', required=False) as boundary: version = ChoicesParser(self.venv_choices).parse(state) @@ -156,7 +162,7 @@ def parse(self, state): # type: (ParserState) -> t.Any setattr(namespace, 'user', user) - with state.delimit(':', required=False) as colon: + with state.delimit(':', required=False) as colon: # type: ParserBoundary host = AnyParser(no_match_message=f'Expected {{host}} from: {self.EXPECTED_FORMAT}').parse(state) setattr(namespace, 'host', host) diff --git a/test/lib/ansible_test/_internal/commands/coverage/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/__init__.py index 50bc82632f8599..88128c46eadf70 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/__init__.py @@ -95,7 +95,16 @@ def run_coverage(args, host_state, output_file, command, cmd): # type: (Coverag cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd - intercept_python(args, host_state.controller_profile.python, cmd, env) + stdout, stderr = intercept_python(args, host_state.controller_profile.python, cmd, env, capture=True) + + stdout = (stdout or '').strip() + stderr = (stderr or '').strip() + + if stdout: + display.info(stdout) + + if stderr: + display.warning(stderr) def get_all_coverage_files(): # type: () -> t.List[str] @@ -152,7 +161,7 @@ def enumerate_python_arcs( modules, # type: t.Dict[str, str] collection_search_re, # type: t.Optional[t.Pattern] collection_sub_re, # type: t.Optional[t.Pattern] -): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]] +): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]], None, None] """Enumerate Python code coverage arcs in the given file.""" if os.path.getsize(path) == 0: display.warning('Empty coverage file: %s' % path, verbosity=2) @@ -193,7 +202,7 @@ def enumerate_powershell_lines( path, # type: str collection_search_re, # type: t.Optional[t.Pattern] collection_sub_re, # type: t.Optional[t.Pattern] -): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]] +): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]], None, None] """Enumerate PowerShell code coverage lines in the given file.""" if os.path.getsize(path) == 0: display.warning('Empty coverage file: %s' % path, verbosity=2) @@ -298,7 +307,7 @@ class PathChecker: def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None self.args = args self.collection_search_re = collection_search_re - self.invalid_paths = [] + self.invalid_paths = [] # type: t.List[str] self.invalid_path_chars = 0 def check_path(self, path): # type: (str) -> bool diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py index db169fd7a03b57..16521bef4f179f 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py @@ -14,4 +14,4 @@ def __init__(self, args): # type: (t.Any) -> None # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used - self.info_stderr = True + self.display_stderr = True diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py index a39d12c82549ff..267969886eaa89 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py @@ -18,27 +18,22 @@ CoverageAnalyzeConfig, ) -if t.TYPE_CHECKING: - TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int]) - NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]] - IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]] - Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]] - Lines = t.Dict[str, t.Dict[int, t.Set[int]]] - TargetIndexes = t.Dict[str, int] - TargetSetIndexes = t.Dict[t.FrozenSet[int], int] +TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int]) +NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]] +IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]] +Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]] +Lines = t.Dict[str, t.Dict[int, t.Set[int]]] +TargetIndexes = t.Dict[str, int] +TargetSetIndexes = t.Dict[t.FrozenSet[int], int] class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig): """Configuration for the `coverage analyze targets` command.""" - def __init__(self, args): # type: (t.Any) -> None - super().__init__(args) - - self.info_stderr = True def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any] """Condense target indexes, arcs and lines into a compact report.""" - set_indexes = {} + set_indexes = {} # type: TargetSetIndexes arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items()) line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items()) @@ -95,6 +90,11 @@ def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t. ), verbosity=1) +def format_line(value): # type: (int) -> str + """Format line as a string.""" + return str(value) # putting this in a function keeps both pylint and mypy happy + + def format_arc(value): # type: (t.Tuple[int, int]) -> str """Format an arc tuple as a string.""" return '%d:%d' % value diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py index d68edc02b26dfe..1ea9d59eb4cf64 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py @@ -18,13 +18,12 @@ write_report, ) -if t.TYPE_CHECKING: - from . import ( - Arcs, - IndexedPoints, - Lines, - TargetIndexes, - ) +from . import ( + Arcs, + IndexedPoints, + Lines, + TargetIndexes, +) class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig): diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py index 6ca6e6d33afffc..d92834246067f5 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py @@ -19,6 +19,7 @@ CoverageAnalyzeTargetsConfig, expand_indexes, format_arc, + format_line, read_report, ) @@ -43,7 +44,7 @@ def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTarg report = dict( arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc), - lines=expand_indexes(covered_path_lines, covered_targets, str), + lines=expand_indexes(covered_path_lines, covered_targets, format_line), ) if not args.explain: diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py index e5d2f500038517..e5e0dff774d8d2 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py @@ -21,11 +21,10 @@ write_report, ) -if t.TYPE_CHECKING: - from . import ( - NamedPoints, - TargetIndexes, - ) +from . import ( + NamedPoints, + TargetIndexes, +) class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig): diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py index 3f9bca74db999b..54b2516fc79709 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py @@ -43,12 +43,11 @@ write_report, ) -if t.TYPE_CHECKING: - from . import ( - Arcs, - Lines, - TargetIndexes, - ) +from . import ( + Arcs, + Lines, + TargetIndexes, +) class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig): @@ -68,7 +67,7 @@ def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTa raise Delegate(host_state) root = data_context().content.root - target_indexes = {} + target_indexes = {} # type: TargetIndexes arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, host_state, args.input_dir, target_indexes).items()) lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items()) report = make_report(target_indexes, arcs, lines) @@ -139,7 +138,7 @@ def analyze_powershell_coverage( def prune_invalid_filenames( args, # type: CoverageAnalyzeTargetsGenerateConfig results, # type: t.Dict[str, t.Any] - collection_search_re=None, # type: t.Optional[str] + collection_search_re=None, # type: t.Optional[t.Pattern] ): # type: (...) -> None """Remove invalid filenames from the given result set.""" path_checker = PathChecker(args, collection_search_re) diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py index 9b6d696dbedfca..f3cdfe5b957953 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py +++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py @@ -24,11 +24,10 @@ write_report, ) -if t.TYPE_CHECKING: - from . import ( - TargetIndexes, - IndexedPoints, - ) +from . import ( + TargetIndexes, + IndexedPoints, +) class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig): @@ -53,7 +52,7 @@ def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTar from_targets, from_path_arcs, from_path_lines = read_report(args.from_file) to_targets, to_path_arcs, to_path_lines = read_report(args.to_file) - target_indexes = {} + target_indexes = {} # type: TargetIndexes if args.only_gaps: arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists) @@ -74,7 +73,7 @@ def find_gaps( only_exists, # type: bool ): # type: (...) -> IndexedPoints """Find gaps in coverage between the from and to data sets.""" - target_data = {} + target_data = {} # type: IndexedPoints for from_path, from_points in from_data.items(): if only_exists and not os.path.isfile(to_bytes(from_path)): @@ -100,7 +99,7 @@ def find_missing( only_exists, # type: bool ): # type: (...) -> IndexedPoints """Find coverage in from_data not present in to_data (arcs or lines).""" - target_data = {} + target_data = {} # type: IndexedPoints for from_path, from_points in from_data.items(): if only_exists and not os.path.isfile(to_bytes(from_path)): diff --git a/test/lib/ansible_test/_internal/commands/coverage/combine.py b/test/lib/ansible_test/_internal/commands/coverage/combine.py index b240df461eed63..c93be27090824b 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/combine.py +++ b/test/lib/ansible_test/_internal/commands/coverage/combine.py @@ -18,11 +18,11 @@ ANSIBLE_TEST_TOOLS_ROOT, display, ApplicationError, + raw_command, ) from ...util_common import ( ResultType, - run_command, write_json_file, write_json_test_results, ) @@ -189,7 +189,7 @@ def _default_stub_value(source_paths): cmd = ['pwsh', os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'coverage_stub.ps1')] cmd.extend(source_paths) - stubs = json.loads(run_command(args, cmd, capture=True, always=True)[0]) + stubs = json.loads(raw_command(cmd, capture=True)[0]) return dict((d['Path'], dict((line, 0) for line in d['Lines'])) for d in stubs) @@ -315,7 +315,6 @@ def get_coverage_group(args, coverage_file): # type: (CoverageCombineConfig, st """Return the name of the coverage group for the specified coverage file, or None if no group was found.""" parts = os.path.basename(coverage_file).split('=', 4) - # noinspection PyTypeChecker if len(parts) != 5 or not parts[4].startswith('coverage.'): return None diff --git a/test/lib/ansible_test/_internal/commands/coverage/xml.py b/test/lib/ansible_test/_internal/commands/coverage/xml.py index ed9603c28fa532..c498d1c2b25f51 100644 --- a/test/lib/ansible_test/_internal/commands/coverage/xml.py +++ b/test/lib/ansible_test/_internal/commands/coverage/xml.py @@ -76,7 +76,7 @@ def _generate_powershell_xml(coverage_file): # type: (str) -> Element content_root = data_context().content.root is_ansible = data_context().content.is_ansible - packages = {} + packages = {} # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]] for path, results in coverage_info.items(): filename = os.path.splitext(os.path.basename(path))[0] @@ -131,7 +131,7 @@ def _generate_powershell_xml(coverage_file): # type: (str) -> Element return elem_coverage -def _add_cobertura_package(packages, package_name, package_data): # type: (SubElement, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int] +def _add_cobertura_package(packages, package_name, package_data): # type: (Element, str, t.Dict[str, t.Dict[str, int]]) -> t.Tuple[int, int] """Add a package element to the given packages element.""" elem_package = SubElement(packages, 'package') elem_classes = SubElement(elem_package, 'classes') diff --git a/test/lib/ansible_test/_internal/commands/env/__init__.py b/test/lib/ansible_test/_internal/commands/env/__init__.py index c625209c845ac5..41a1d520904108 100644 --- a/test/lib/ansible_test/_internal/commands/env/__init__.py +++ b/test/lib/ansible_test/_internal/commands/env/__init__.py @@ -17,9 +17,9 @@ from ...util import ( display, - SubprocessError, get_ansible_version, get_available_python_versions, + ApplicationError, ) from ...util_common import ( @@ -30,8 +30,8 @@ from ...docker_util import ( get_docker_command, - docker_info, - docker_version + get_docker_info, + get_docker_container_id, ) from ...constants import ( @@ -70,11 +70,14 @@ def show_dump_env(args): # type: (EnvConfig) -> None if not args.show and not args.dump: return + container_id = get_docker_container_id() + data = dict( ansible=dict( version=get_ansible_version(), ), docker=get_docker_details(args), + container_id=container_id, environ=os.environ.copy(), location=dict( pwd=os.environ.get('PWD', None), @@ -166,7 +169,7 @@ def show_dict(data, verbose, root_verbosity=0, path=None): # type: (t.Dict[str, display.info(indent + '%s: %s' % (key, value), verbosity=verbosity) -def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, str] +def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, t.Any] """Return details about docker.""" docker = get_docker_command() @@ -178,14 +181,12 @@ def get_docker_details(args): # type: (EnvConfig) -> t.Dict[str, str] executable = docker.executable try: - info = docker_info(args) - except SubprocessError as ex: - display.warning('Failed to collect docker info:\n%s' % ex) - - try: - version = docker_version(args) - except SubprocessError as ex: - display.warning('Failed to collect docker version:\n%s' % ex) + docker_info = get_docker_info(args) + except ApplicationError as ex: + display.warning(str(ex)) + else: + info = docker_info.info + version = docker_info.version docker_details = dict( executable=executable, diff --git a/test/lib/ansible_test/_internal/commands/integration/__init__.py b/test/lib/ansible_test/_internal/commands/integration/__init__.py index a9a49aa14b0294..2ae1e39c9dba9c 100644 --- a/test/lib/ansible_test/_internal/commands/integration/__init__.py +++ b/test/lib/ansible_test/_internal/commands/integration/__init__.py @@ -98,6 +98,7 @@ from ...host_profiles import ( ControllerProfile, + ControllerHostProfile, HostProfile, PosixProfile, SshTargetHostProfile, @@ -134,7 +135,7 @@ def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTa """Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend.""" targets_dict = dict((target.name, target) for target in integration_targets) target_dependencies = analyze_integration_target_dependencies(integration_targets) - dependency_map = {} + dependency_map = {} # type: t.Dict[str, t.Set[IntegrationTarget]] invalid_targets = set() @@ -159,7 +160,7 @@ def generate_dependency_map(integration_targets): # type: (t.List[IntegrationTa def get_files_needed(target_dependencies): # type: (t.List[IntegrationTarget]) -> t.List[str] """Return a list of files needed by the given list of target dependencies.""" - files_needed = [] + files_needed = [] # type: t.List[str] for target_dependency in target_dependencies: files_needed += target_dependency.needs_file @@ -241,7 +242,7 @@ def integration_test_environment( args, # type: IntegrationConfig target, # type: IntegrationTarget inventory_path_src, # type: str -): # type: (...) -> t.ContextManager[IntegrationEnvironment] +): # type: (...) -> t.Iterator[IntegrationEnvironment] """Context manager that prepares the integration test environment and cleans it up.""" ansible_config_src = args.get_ansible_config() ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command) @@ -324,7 +325,7 @@ def integration_test_environment( display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2) if not args.explain: - shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) + shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) # type: ignore[arg-type] # incorrect type stub omits bytes path support for file_src, file_dst in file_copies: display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2) @@ -344,7 +345,7 @@ def integration_test_config_file( args, # type: IntegrationConfig env_config, # type: CloudEnvironmentConfig integration_dir, # type: str -): # type: (...) -> t.ContextManager[t.Optional[str]] +): # type: (...) -> t.Iterator[t.Optional[str]] """Context manager that provides a config file for integration tests, if needed.""" if not env_config: yield None @@ -361,7 +362,7 @@ def integration_test_config_file( config_file = json.dumps(config_vars, indent=4, sort_keys=True) - with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: + with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: # type: str filename = os.path.relpath(path, integration_dir) display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3) @@ -398,8 +399,8 @@ def create_inventory( def command_integration_filtered( args, # type: IntegrationConfig host_state, # type: HostState - targets, # type: t.Tuple[IntegrationTarget] - all_targets, # type: t.Tuple[IntegrationTarget] + targets, # type: t.Tuple[IntegrationTarget, ...] + all_targets, # type: t.Tuple[IntegrationTarget, ...] inventory_path, # type: str pre_target=None, # type: t.Optional[t.Callable[[IntegrationTarget], None]] post_target=None, # type: t.Optional[t.Callable[[IntegrationTarget], None]] @@ -413,7 +414,7 @@ def command_integration_filtered( all_targets_dict = dict((target.name, target) for target in all_targets) setup_errors = [] - setup_targets_executed = set() + setup_targets_executed = set() # type: t.Set[str] for target in all_targets: for setup_target in target.setup_once + target.setup_always: @@ -530,6 +531,10 @@ def command_integration_filtered( if not tries: raise + if target.retry_never: + display.warning(f'Skipping retry of test target "{target.name}" since it has been excluded from retries.') + raise + display.warning('Retrying test target "%s" with maximum verbosity.' % target.name) display.verbosity = args.verbosity = 6 @@ -538,7 +543,7 @@ def command_integration_filtered( failed.append(target) if args.continue_on_error: - display.error(ex) + display.error(str(ex)) continue display.notice('To resume at this test target, use the option: --start-at %s' % target.name) @@ -597,7 +602,7 @@ def command_integration_script( module_defaults=env_config.module_defaults, ), indent=4, sort_keys=True), verbosity=3) - with integration_test_environment(args, target, inventory_path) as test_env: + with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment cmd = ['./%s' % os.path.basename(target.script_path)] if args.verbosity: @@ -614,12 +619,12 @@ def command_integration_script( if env_config and env_config.env_vars: env.update(env_config.env_vars) - with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: + with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: # type: t.Optional[str] if config_path: cmd += ['-e', '@%s' % config_path] env.update(coverage_manager.get_environment(target.name, target.aliases)) - cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd) + cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False) def command_integration_role( @@ -673,7 +678,7 @@ def command_integration_role( module_defaults=env_config.module_defaults, ), indent=4, sort_keys=True), verbosity=3) - with integration_test_environment(args, target, inventory_path) as test_env: + with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment if os.path.exists(test_env.vars_file): vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir)) @@ -738,14 +743,14 @@ def command_integration_role( env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir env.update(coverage_manager.get_environment(target.name, target.aliases)) - cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd) + cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False) def run_setup_targets( args, # type: IntegrationConfig host_state, # type: HostState test_dir, # type: str - target_names, # type: t.List[str] + target_names, # type: t.Sequence[str] targets_dict, # type: t.Dict[str, IntegrationTarget] targets_executed, # type: t.Set[str] inventory_path, # type: str @@ -956,13 +961,10 @@ def integration_config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> return host_state, internal_targets -def requirements(args, host_state): # type: (IntegrationConfig, HostState) -> None - """Install requirements.""" - target_profile = host_state.target_profiles[0] - - configure_pypi_proxy(args, host_state.controller_profile) # integration, windows-integration, network-integration - - if isinstance(target_profile, PosixProfile) and not isinstance(target_profile, ControllerProfile): - configure_pypi_proxy(args, target_profile) # integration - - install_requirements(args, host_state.controller_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration +def requirements(host_profile: HostProfile) -> None: + """Install requirements after bootstrapping and delegation.""" + if isinstance(host_profile, ControllerHostProfile) and host_profile.controller: + configure_pypi_proxy(host_profile.args, host_profile) # integration, windows-integration, network-integration + install_requirements(host_profile.args, host_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration + elif isinstance(host_profile, PosixProfile) and not isinstance(host_profile, ControllerProfile): + configure_pypi_proxy(host_profile.args, host_profile) # integration diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py index 70f8afafe6c04a..5afde048b31ebd 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py @@ -59,8 +59,8 @@ def get_cloud_plugins(): # type: () -> t.Tuple[t.Dict[str, t.Type[CloudProvider """Import cloud plugins and load them into the plugin dictionaries.""" import_plugins('commands/integration/cloud') - providers = {} - environments = {} + providers = {} # type: t.Dict[str, t.Type[CloudProvider]] + environments = {} # type: t.Dict[str, t.Type[CloudEnvironment]] load_plugins(CloudProvider, providers) load_plugins(CloudEnvironment, environments) @@ -134,7 +134,7 @@ def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[Integratio if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation - exclude = [] + exclude = [] # type: t.List[str] for provider in get_cloud_providers(args, targets): provider.filter(targets, exclude) @@ -206,7 +206,7 @@ def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None @property def setup_executed(self): # type: () -> bool """True if setup has been executed, otherwise False.""" - return self._get_cloud_config(self._SETUP_EXECUTED, False) + return t.cast(bool, self._get_cloud_config(self._SETUP_EXECUTED, False)) @setup_executed.setter def setup_executed(self, value): # type: (bool) -> None @@ -216,7 +216,7 @@ def setup_executed(self, value): # type: (bool) -> None @property def config_path(self): # type: () -> str """Path to the configuration file.""" - return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH)) + return os.path.join(data_context().content.root, str(self._get_cloud_config(self._CONFIG_PATH))) @config_path.setter def config_path(self, value): # type: (str) -> None @@ -226,7 +226,7 @@ def config_path(self, value): # type: (str) -> None @property def resource_prefix(self): # type: () -> str """Resource prefix.""" - return self._get_cloud_config(self._RESOURCE_PREFIX) + return str(self._get_cloud_config(self._RESOURCE_PREFIX)) @resource_prefix.setter def resource_prefix(self, value): # type: (str) -> None @@ -236,7 +236,7 @@ def resource_prefix(self, value): # type: (str) -> None @property def managed(self): # type: () -> bool """True if resources are managed by ansible-test, otherwise False.""" - return self._get_cloud_config(self._MANAGED) + return t.cast(bool, self._get_cloud_config(self._MANAGED)) @managed.setter def managed(self, value): # type: (bool) -> None diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py index 94e60667c3c74a..48aef72751444f 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py @@ -21,6 +21,7 @@ from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from ....host_configs import ( @@ -91,7 +92,7 @@ def _setup_dynamic(self): # type: () -> None def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return an AWS instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'aws', 'aws', 'aws', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='aws')) class AwsCloudEnvironment(CloudEnvironment): @@ -104,9 +105,8 @@ def get_environment_config(self): # type: () -> CloudEnvironmentConfig ansible_vars = dict( resource_prefix=self.resource_prefix, tiny_prefix=uuid.uuid4().hex[0:12] - ) + ) # type: t.Dict[str, t.Any] - # noinspection PyTypeChecker ansible_vars.update(dict(parser.items('default'))) display.sensitive.add(ansible_vars.get('aws_secret_key')) diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py index 002fa581dbaffe..a6c7156aa33739 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py @@ -29,6 +29,7 @@ from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from . import ( @@ -45,7 +46,7 @@ class AzureCloudProvider(CloudProvider): def __init__(self, args): # type: (IntegrationConfig) -> None super().__init__(args) - self.aci = None + self.aci = None # type: t.Optional[AnsibleCoreCI] self.uses_config = True @@ -133,7 +134,7 @@ def _setup_dynamic(self): # type: () -> None def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return an Azure instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'azure', 'azure', 'azure', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='azure')) class AzureCloudEnvironment(CloudEnvironment): diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py index f20a7d887ef0d4..8ffcabfb32ee05 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py @@ -106,7 +106,7 @@ def _setup_dynamic(self): # type: () -> None # apply work-around for OverlayFS issue # https://github.com/docker/for-linux/issues/72#issuecomment-319904698 - docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';']) + docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True) if self.args.explain: values = dict( diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py index b4ca48f75f4dc5..86a38fef24b8cc 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py @@ -85,8 +85,8 @@ class ForemanEnvironment(CloudEnvironment): def get_environment_config(self): # type: () -> CloudEnvironmentConfig """Return environment configuration for use in the test environment after delegation.""" env_vars = dict( - FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'), - FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'), + FOREMAN_HOST=str(self._get_cloud_config('FOREMAN_HOST')), + FOREMAN_PORT=str(self._get_cloud_config('FOREMAN_PORT')), ) return CloudEnvironmentConfig( diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py index de58cbf5bca69f..302a2919153f94 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py @@ -145,8 +145,8 @@ class GalaxyEnvironment(CloudEnvironment): """Galaxy environment plugin. Updates integration test environment after delegation.""" def get_environment_config(self): # type: () -> CloudEnvironmentConfig """Return environment configuration for use in the test environment after delegation.""" - pulp_user = self._get_cloud_config('PULP_USER') - pulp_password = self._get_cloud_config('PULP_PASSWORD') + pulp_user = str(self._get_cloud_config('PULP_USER')) + pulp_password = str(self._get_cloud_config('PULP_PASSWORD')) pulp_host = self._get_cloud_config('PULP_HOST') galaxy_port = self._get_cloud_config('GALAXY_PORT') pulp_port = self._get_cloud_config('PULP_PORT') diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py index 28b07e72305aa7..6912aff36dd1b8 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py @@ -18,6 +18,7 @@ from ....core_ci import ( AnsibleCoreCI, + CloudResource, ) from . import ( @@ -78,7 +79,7 @@ def _setup_dynamic(self): # type: () -> None def _create_ansible_core_ci(self): # type: () -> AnsibleCoreCI """Return a Heztner instance of AnsibleCoreCI.""" - return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', 'hetzner', persist=False) + return AnsibleCoreCI(self.args, CloudResource(platform='hetzner')) class HcloudCloudEnvironment(CloudEnvironment): diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py index 2d8217e99cdf1f..00c62b76e642df 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py @@ -87,6 +87,6 @@ def get_environment_config(self): # type: () -> CloudEnvironmentConfig return CloudEnvironmentConfig( env_vars=dict( HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester - KRB5_PASSWORD=self._get_cloud_config(KRB5_PASSWORD_ENV), + KRB5_PASSWORD=str(self._get_cloud_config(KRB5_PASSWORD_ENV)), ) ) diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py index fb69b9b2124523..2093b461c89377 100644 --- a/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py +++ b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py @@ -107,14 +107,14 @@ def get_environment_config(self): # type: () -> CloudEnvironmentConfig ansible_vars.update(dict(parser.items('DEFAULT', raw=True))) except KeyError: # govcsim env_vars = dict( - VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'), + VCENTER_HOSTNAME=str(self._get_cloud_config('vcenter_hostname')), VCENTER_USERNAME='user', VCENTER_PASSWORD='pass', ) ansible_vars = dict( - vcsim=self._get_cloud_config('vcenter_hostname'), - vcenter_hostname=self._get_cloud_config('vcenter_hostname'), + vcsim=str(self._get_cloud_config('vcenter_hostname')), + vcenter_hostname=str(self._get_cloud_config('vcenter_hostname')), vcenter_username='user', vcenter_password='pass', ) diff --git a/test/lib/ansible_test/_internal/commands/integration/coverage.py b/test/lib/ansible_test/_internal/commands/integration/coverage.py index c36b440366fde0..dd885c30f9aacb 100644 --- a/test/lib/ansible_test/_internal/commands/integration/coverage.py +++ b/test/lib/ansible_test/_internal/commands/integration/coverage.py @@ -33,6 +33,7 @@ get_type_map, remove_tree, sanitize_host_name, + verified_chmod, ) from ...util_common import ( @@ -118,7 +119,7 @@ def get_environment(self, target_name, aliases): # type: (str, t.Tuple[str, ... def run_playbook(self, playbook, variables): # type: (str, t.Dict[str, str]) -> None """Run the specified playbook using the current inventory.""" self.create_inventory() - run_playbook(self.args, self.inventory_path, playbook, variables) + run_playbook(self.args, self.inventory_path, playbook, capture=False, variables=variables) class PosixCoverageHandler(CoverageHandler[PosixConfig]): @@ -166,9 +167,9 @@ def setup_controller(self): write_text_file(coverage_config_path, coverage_config, create_directories=True) - os.chmod(coverage_config_path, MODE_FILE) + verified_chmod(coverage_config_path, MODE_FILE) os.mkdir(coverage_output_path) - os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE) + verified_chmod(coverage_output_path, MODE_DIRECTORY_WRITE) def setup_target(self): """Perform setup for code coverage on the target.""" @@ -271,7 +272,7 @@ def __init__(self, args, host_state, inventory_path): # type: (IntegrationConfi @property def is_active(self): # type: () -> bool """True if the handler should be used, otherwise False.""" - return self.profiles and not self.args.coverage_check + return bool(self.profiles) and not self.args.coverage_check def setup(self): # type: () -> None """Perform setup for code coverage.""" diff --git a/test/lib/ansible_test/_internal/commands/integration/filters.py b/test/lib/ansible_test/_internal/commands/integration/filters.py index 0396ce9231a1bb..63c7c6b5b151e5 100644 --- a/test/lib/ansible_test/_internal/commands/integration/filters.py +++ b/test/lib/ansible_test/_internal/commands/integration/filters.py @@ -10,6 +10,7 @@ from ...util import ( cache, + detect_architecture, display, get_type_map, ) @@ -108,19 +109,19 @@ def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], if not self.allow_destructive and not self.config.is_managed: override_destructive = set(target for target in self.include_targets if target.startswith('destructive/')) - override = [target.name for target in targets if override_destructive & set(target.skips)] + override = [target.name for target in targets if override_destructive & set(target.aliases)] self.skip('destructive', 'which require --allow-destructive or prefixing with "destructive/" to run on unmanaged hosts', targets, exclude, override) if not self.args.allow_disabled: override_disabled = set(target for target in self.args.include if target.startswith('disabled/')) - override = [target.name for target in targets if override_disabled & set(target.skips)] + override = [target.name for target in targets if override_disabled & set(target.aliases)] self.skip('disabled', 'which require --allow-disabled or prefixing with "disabled/"', targets, exclude, override) if not self.args.allow_unsupported: override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/')) - override = [target.name for target in targets if override_unsupported & set(target.skips)] + override = [target.name for target in targets if override_unsupported & set(target.aliases)] self.skip('unsupported', 'which require --allow-unsupported or prefixing with "unsupported/"', targets, exclude, override) @@ -130,7 +131,7 @@ def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], if self.args.allow_unstable_changed: override_unstable |= set(self.args.metadata.change_description.focused_targets or []) - override = [target.name for target in targets if override_unstable & set(target.skips)] + override = [target.name for target in targets if override_unstable & set(target.aliases)] self.skip('unstable', 'which require --allow-unstable or prefixing with "unstable/"', targets, exclude, override) @@ -223,6 +224,14 @@ class NetworkInventoryTargetFilter(TargetFilter[NetworkInventoryConfig]): class OriginTargetFilter(PosixTargetFilter[OriginConfig]): """Target filter for localhost.""" + def filter_targets(self, targets, exclude): # type: (t.List[IntegrationTarget], t.Set[str]) -> None + """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list.""" + super().filter_targets(targets, exclude) + + arch = detect_architecture(self.config.python.path) + + if arch: + self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude) @cache @@ -247,10 +256,7 @@ def get_target_filter(args, configs, controller): # type: (IntegrationConfig, t def get_remote_skip_aliases(config): # type: (RemoteConfig) -> t.Dict[str, str] """Return a dictionary of skip aliases and the reason why they apply.""" - if isinstance(config, PosixRemoteConfig): - return get_platform_skip_aliases(config.platform, config.version, config.arch) - - return get_platform_skip_aliases(config.platform, config.version, None) + return get_platform_skip_aliases(config.platform, config.version, config.arch) def get_platform_skip_aliases(platform, version, arch): # type: (str, str, t.Optional[str]) -> t.Dict[str, str] diff --git a/test/lib/ansible_test/_internal/commands/sanity/__init__.py b/test/lib/ansible_test/_internal/commands/sanity/__init__.py index 8c1340f2fc9fed..c5008193de6c7e 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/__init__.py +++ b/test/lib/ansible_test/_internal/commands/sanity/__init__.py @@ -142,7 +142,7 @@ def command_sanity(args): # type: (SanityConfig) -> None if not targets.include: raise AllTargetsSkipped() - tests = sanity_get_tests() + tests = list(sanity_get_tests()) if args.test: disabled = [] @@ -157,6 +157,8 @@ def command_sanity(args): # type: (SanityConfig) -> None targets_use_pypi = any(isinstance(test, SanityMultipleVersion) and test.needs_pypi for test in tests) and not args.list_tests host_state = prepare_profiles(args, targets_use_pypi=targets_use_pypi) # sanity + get_content_config(args) # make sure content config has been parsed prior to delegation + if args.delegate: raise Delegate(host_state=host_state, require=changes, exclude=args.exclude) @@ -170,9 +172,11 @@ def command_sanity(args): # type: (SanityConfig) -> None total = 0 failed = [] + result: t.Optional[TestResult] + for test in tests: if args.list_tests: - display.info(test.name) + print(test.name) # display goes to stderr, this should be on stdout continue for version in SUPPORTED_PYTHON_VERSIONS: @@ -201,19 +205,19 @@ def command_sanity(args): # type: (SanityConfig) -> None else: raise Exception('Unsupported test type: %s' % type(test)) - all_targets = targets.targets + all_targets = list(targets.targets) if test.all_targets: - usable_targets = targets.targets + usable_targets = list(targets.targets) elif test.no_targets: - usable_targets = tuple() + usable_targets = [] else: - usable_targets = targets.include + usable_targets = list(targets.include) all_targets = SanityTargets.filter_and_inject_targets(test, all_targets) usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets) - usable_targets = sorted(test.filter_targets_by_version(list(usable_targets), version)) + usable_targets = sorted(test.filter_targets_by_version(args, list(usable_targets), version)) usable_targets = settings.filter_skipped_targets(usable_targets) sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets)) @@ -355,12 +359,12 @@ def __init__(self, args): # type: (SanityConfig) -> None for python_version in test.supported_python_versions: test_name = '%s-%s' % (test.name, python_version) - paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(test_targets, python_version)) + paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, python_version)) tests_by_name[test_name] = test else: unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS)) - paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(test_targets, '')) + paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, '')) tests_by_name[test.name] = test for line_no, line in enumerate(lines, start=1): @@ -503,12 +507,15 @@ def __init__(self, args): # type: (SanityConfig) -> None def load(args): # type: (SanityConfig) -> SanityIgnoreParser """Return the current SanityIgnore instance, initializing it if needed.""" try: - return SanityIgnoreParser.instance + return SanityIgnoreParser.instance # type: ignore[attr-defined] except AttributeError: pass - SanityIgnoreParser.instance = SanityIgnoreParser(args) - return SanityIgnoreParser.instance + instance = SanityIgnoreParser(args) + + SanityIgnoreParser.instance = instance # type: ignore[attr-defined] + + return instance class SanityIgnoreProcessor: @@ -571,7 +578,7 @@ def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[ def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage] """Return error messages related to issues with the file.""" - messages = [] + messages = [] # type: t.List[SanityMessage] # unused errors @@ -621,7 +628,7 @@ def __init__( self, test, # type: str python_version=None, # type: t.Optional[str] - messages=None, # type: t.Optional[t.List[SanityMessage]] + messages=None, # type: t.Optional[t.Sequence[SanityMessage]] summary=None, # type: t.Optional[str] ): # type: (...) -> None super().__init__(COMMAND, test, python_version, messages, summary) @@ -633,7 +640,7 @@ class SanityMessage(TestMessage): class SanityTargets: """Sanity test target information.""" - def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None + def __init__(self, targets, include): # type: (t.Tuple[TestTarget, ...], t.Tuple[TestTarget, ...]) -> None self.targets = targets self.include = include @@ -671,11 +678,13 @@ def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[T def get_targets(): # type: () -> t.Tuple[TestTarget, ...] """Return a tuple of sanity test targets. Uses a cached version when available.""" try: - return SanityTargets.get_targets.targets + return SanityTargets.get_targets.targets # type: ignore[attr-defined] except AttributeError: - SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets())) + targets = tuple(sorted(walk_sanity_targets())) + + SanityTargets.get_targets.targets = targets # type: ignore[attr-defined] - return SanityTargets.get_targets.targets + return targets class SanityTest(metaclass=abc.ABCMeta): @@ -695,7 +704,7 @@ def __init__(self, name=None): # type: (t.Optional[str]) -> None # Because these errors can be unpredictable they behave differently than normal error codes: # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors. # * They cannot be ignored. This is done to maintain the integrity of the ignore system. - self.optional_error_codes = set() + self.optional_error_codes = set() # type: t.Set[str] @property def error_code(self): # type: () -> t.Optional[str] @@ -749,7 +758,7 @@ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestT raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name) - def filter_targets_by_version(self, targets, python_version): # type: (t.List[TestTarget], str) -> t.List[TestTarget] + def filter_targets_by_version(self, args, targets, python_version): # type: (SanityConfig, t.List[TestTarget], str) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version.""" del python_version # python_version is not used here, but derived classes may make use of it @@ -757,7 +766,7 @@ def filter_targets_by_version(self, targets, python_version): # type: (t.List[T if self.py2_compat: # This sanity test is a Python 2.x compatibility test. - content_config = get_content_config() + content_config = get_content_config(args) if content_config.py2_support: # This collection supports Python 2.x. @@ -938,6 +947,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py cmd = [python.path, self.path] env = ansible_environment(args, color=False) + env.update(PYTHONUTF8='1') # force all code-smell sanity tests to run with Python UTF-8 Mode enabled pattern = None data = None @@ -952,7 +962,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py elif self.output == 'path-message': pattern = '^(?P[^:]*): (?P.*)$' else: - pattern = ApplicationError('Unsupported output type: %s' % self.output) + raise ApplicationError('Unsupported output type: %s' % self.output) if not self.no_targets: data = '\n'.join(paths) @@ -1041,15 +1051,15 @@ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...] """A tuple of supported Python versions or None if the test does not depend on specific Python versions.""" return SUPPORTED_PYTHON_VERSIONS - def filter_targets_by_version(self, targets, python_version): # type: (t.List[TestTarget], str) -> t.List[TestTarget] + def filter_targets_by_version(self, args, targets, python_version): # type: (SanityConfig, t.List[TestTarget], str) -> t.List[TestTarget] """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version.""" if not python_version: raise Exception('python_version is required to filter multi-version tests') - targets = super().filter_targets_by_version(targets, python_version) + targets = super().filter_targets_by_version(args, targets, python_version) if python_version in REMOTE_ONLY_PYTHON_VERSIONS: - content_config = get_content_config() + content_config = get_content_config(args) if python_version not in content_config.modules.python_versions: # when a remote-only python version is not supported there are no paths to test diff --git a/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py index 82d9f751335f09..f542a1718686ec 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py +++ b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py @@ -11,6 +11,7 @@ SanityFailure, SanitySuccess, SanityTargets, + SanityMessage, ) from ...test import ( @@ -77,8 +78,8 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py paths = [target.path for target in targets.include] - doc_targets = collections.defaultdict(list) - target_paths = collections.defaultdict(dict) + doc_targets = collections.defaultdict(list) # type: t.Dict[str, t.List[str]] + target_paths = collections.defaultdict(dict) # type: t.Dict[str, t.Dict[str, str]] remap_types = dict( modules='module', @@ -97,7 +98,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path env = ansible_environment(args, color=False) - error_messages = [] + error_messages = [] # type: t.List[SanityMessage] for doc_type in sorted(doc_targets): for format_option in [None, '--json']: diff --git a/test/lib/ansible_test/_internal/commands/sanity/ignores.py b/test/lib/ansible_test/_internal/commands/sanity/ignores.py index 9a39955ac558ab..867243adfee0e7 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/ignores.py +++ b/test/lib/ansible_test/_internal/commands/sanity/ignores.py @@ -2,6 +2,7 @@ from __future__ import annotations import os +import typing as t from . import ( SanityFailure, @@ -38,7 +39,7 @@ def no_targets(self): # type: () -> bool def test(self, args, targets): # type: (SanityConfig, SanityTargets) -> TestResult sanity_ignore = SanityIgnoreParser.load(args) - messages = [] + messages = [] # type: t.List[SanityMessage] # parse errors diff --git a/test/lib/ansible_test/_internal/commands/sanity/import.py b/test/lib/ansible_test/_internal/commands/sanity/import.py index aa0239d522aaf8..28619e6f006e2e 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/import.py +++ b/test/lib/ansible_test/_internal/commands/sanity/import.py @@ -111,7 +111,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py try: install_requirements(args, python, virtualenv=True, controller=False) # sanity (import) except PipUnavailableError as ex: - display.warning(ex) + display.warning(str(ex)) temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import') diff --git a/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py index 3ff989598850f6..bc96b684f83b08 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py +++ b/test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py @@ -1,6 +1,7 @@ """Sanity test to check integration test aliases.""" from __future__ import annotations +import dataclasses import json import textwrap import os @@ -128,7 +129,7 @@ def load_ci_config(self, python): # type: (PythonConfig) -> t.Dict[str, t.Any] def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]] """Return a dictionary of CI test names and their group(s).""" if not self._ci_test_groups: - test_groups = {} + test_groups = {} # type: t.Dict[str, t.Set[int]] for stage in self._ci_config['stages']: for job in stage['jobs']: @@ -210,7 +211,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py path=self.CI_YML, )]) - results = dict( + results = Results( comments=[], labels={}, ) @@ -218,7 +219,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py self.load_ci_config(python) self.check_changes(args, results) - write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results) + write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results.__dict__) messages = [] @@ -318,6 +319,9 @@ def check_ci_group( messages = [] for path in unassigned_paths: + if path == 'test/integration/targets/ansible-test-container': + continue # special test target which uses group 6 -- nothing else should be in that group + messages.append(SanityMessage(unassigned_message, '%s/aliases' % path)) for path in conflicting_paths: @@ -325,8 +329,8 @@ def check_ci_group( return messages - def check_changes(self, args, results): # type: (SanityConfig, t.Dict[str, t.Any]) -> None - """Check changes and store results in the provided results dictionary.""" + def check_changes(self, args, results): # type: (SanityConfig, Results) -> None + """Check changes and store results in the provided result dictionary.""" integration_targets = list(walk_integration_targets()) module_targets = list(walk_module_targets()) @@ -370,8 +374,8 @@ def check_changes(self, args, results): # type: (SanityConfig, t.Dict[str, t.An unsupported_tests=bool(unsupported_targets), ) - results['comments'] += comments - results['labels'].update(labels) + results.comments += comments + results.labels.update(labels) def format_comment(self, template, targets): # type: (str, t.List[str]) -> t.Optional[str] """Format and return a comment based on the given template and targets, or None if there are no targets.""" @@ -388,3 +392,10 @@ def format_comment(self, template, targets): # type: (str, t.List[str]) -> t.Op message = textwrap.dedent(template).strip().format(**data) return message + + +@dataclasses.dataclass +class Results: + """Check results.""" + comments: t.List[str] + labels: t.Dict[str, bool] diff --git a/test/lib/ansible_test/_internal/commands/sanity/pep8.py b/test/lib/ansible_test/_internal/commands/sanity/pep8.py index 71241c913ff539..2610e730d91e06 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/pep8.py +++ b/test/lib/ansible_test/_internal/commands/sanity/pep8.py @@ -92,7 +92,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py else: results = [] - results = [SanityMessage( + messages = [SanityMessage( message=r['message'], path=r['path'], line=int(r['line']), @@ -101,7 +101,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py code=r['code'], ) for r in results] - errors = settings.process_errors(results, paths) + errors = settings.process_errors(messages, paths) if errors: return SanityFailure(self.name, messages=errors) diff --git a/test/lib/ansible_test/_internal/commands/sanity/pylint.py b/test/lib/ansible_test/_internal/commands/sanity/pylint.py index a4322f0308dfac..eafc5d5e0b0dac 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/pylint.py +++ b/test/lib/ansible_test/_internal/commands/sanity/pylint.py @@ -142,7 +142,7 @@ def context_filter(path_to_filter): # type: (str) -> bool if data_context().content.collection: try: - collection_detail = get_collection_detail(args, python) + collection_detail = get_collection_detail(python) if not collection_detail.version: display.warning('Skipping pylint collection version checks since no collection version was found.') diff --git a/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py index 0eccc01f9ca7c6..49a025c99dd0fc 100644 --- a/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py +++ b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py @@ -91,7 +91,7 @@ def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, Py cmd.extend(['--collection', data_context().content.collection.directory]) try: - collection_detail = get_collection_detail(args, python) + collection_detail = get_collection_detail(python) if collection_detail.version: cmd.extend(['--collection-version', collection_detail.version]) diff --git a/test/lib/ansible_test/_internal/commands/shell/__init__.py b/test/lib/ansible_test/_internal/commands/shell/__init__.py index 7364819e0c1b41..099734df595e9c 100644 --- a/test/lib/ansible_test/_internal/commands/shell/__init__.py +++ b/test/lib/ansible_test/_internal/commands/shell/__init__.py @@ -2,11 +2,15 @@ from __future__ import annotations import os +import sys import typing as t from ...util import ( ApplicationError, + OutputStream, display, + SubprocessError, + HostConnectionError, ) from ...config import ( @@ -18,6 +22,7 @@ ) from ...connections import ( + Connection, LocalConnection, SshConnection, ) @@ -37,12 +42,20 @@ OriginConfig, ) +from ...inventory import ( + create_controller_inventory, + create_posix_inventory, +) + def command_shell(args): # type: (ShellConfig) -> None """Entry point for the `shell` command.""" if args.raw and isinstance(args.targets[0], ControllerConfig): raise ApplicationError('The --raw option has no effect on the controller.') + if not args.export and not args.cmd and not sys.stdin.isatty(): + raise ApplicationError('Standard input must be a TTY to launch a shell.') + host_state = prepare_profiles(args, skip_setup=args.raw) # shell if args.delegate: @@ -55,13 +68,31 @@ def command_shell(args): # type: (ShellConfig) -> None if isinstance(target_profile, ControllerProfile): # run the shell locally unless a target was requested - con = LocalConnection(args) + con = LocalConnection(args) # type: Connection + + if args.export: + display.info('Configuring controller inventory.', verbosity=1) + create_controller_inventory(args, args.export, host_state.controller_profile) else: # a target was requested, connect to it over SSH con = target_profile.get_controller_target_connections()[0] + if args.export: + display.info('Configuring target inventory.', verbosity=1) + create_posix_inventory(args, args.export, host_state.target_profiles, True) + + if args.export: + return + + if args.cmd: + # Running a command is assumed to be non-interactive. Only a shell (no command) is interactive. + # If we want to support interactive commands in the future, we'll need an `--interactive` command line option. + # Command stderr output is allowed to mix with our own output, which is all sent to stderr. + con.run(args.cmd, capture=False, interactive=False, output_stream=OutputStream.ORIGINAL) + return + if isinstance(con, SshConnection) and args.raw: - cmd = [] + cmd = [] # type: t.List[str] elif isinstance(target_profile, PosixProfile): cmd = [] @@ -86,4 +117,19 @@ def command_shell(args): # type: (ShellConfig) -> None else: cmd = [] - con.run(cmd) + try: + con.run(cmd, capture=False, interactive=True) + except SubprocessError as ex: + if isinstance(con, SshConnection) and ex.status == 255: + # 255 indicates SSH itself failed, rather than a command run on the remote host. + # In this case, report a host connection error so additional troubleshooting output is provided. + if not args.delegate and not args.host_path: + def callback() -> None: + """Callback to run during error display.""" + target_profile.on_target_failure() # when the controller is not delegated, report failures immediately + else: + callback = None + + raise HostConnectionError(f'SSH shell connection failed for host {target_profile.config}: {ex}', callback) from ex + + raise diff --git a/test/lib/ansible_test/_internal/commands/units/__init__.py b/test/lib/ansible_test/_internal/commands/units/__init__.py index 995f7159370bee..1ad0e2f6c43a45 100644 --- a/test/lib/ansible_test/_internal/commands/units/__init__.py +++ b/test/lib/ansible_test/_internal/commands/units/__init__.py @@ -103,7 +103,7 @@ def command_units(args): # type: (UnitsConfig) -> None paths = [target.path for target in include] - content_config = get_content_config() + content_config = get_content_config(args) supported_remote_python_versions = content_config.modules.python_versions if content_config.modules.controller_only: @@ -295,7 +295,7 @@ def command_units(args): # type: (UnitsConfig) -> None display.info('Unit test %s with Python %s' % (test_context, python.version)) try: - cover_python(args, python, cmd, test_context, env) + cover_python(args, python, cmd, test_context, env, capture=False) except SubprocessError as ex: # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case if ex.status != 5: @@ -311,9 +311,9 @@ def get_units_ansible_python_path(args, test_context): # type: (UnitsConfig, st return get_ansible_python_path(args) try: - cache = get_units_ansible_python_path.cache + cache = get_units_ansible_python_path.cache # type: ignore[attr-defined] except AttributeError: - cache = get_units_ansible_python_path.cache = {} + cache = get_units_ansible_python_path.cache = {} # type: ignore[attr-defined] python_path = cache.get(test_context) diff --git a/test/lib/ansible_test/_internal/compat/packaging.py b/test/lib/ansible_test/_internal/compat/packaging.py index a38e1abc2b0f4f..44c2bdbbd649c0 100644 --- a/test/lib/ansible_test/_internal/compat/packaging.py +++ b/test/lib/ansible_test/_internal/compat/packaging.py @@ -1,14 +1,16 @@ """Packaging compatibility.""" from __future__ import annotations +import typing as t + try: from packaging import ( specifiers, version, ) - SpecifierSet = specifiers.SpecifierSet - Version = version.Version + SpecifierSet = specifiers.SpecifierSet # type: t.Optional[t.Type[specifiers.SpecifierSet]] + Version = version.Version # type: t.Optional[t.Type[version.Version]] PACKAGING_IMPORT_ERROR = None except ImportError as ex: SpecifierSet = None # pylint: disable=invalid-name diff --git a/test/lib/ansible_test/_internal/compat/yaml.py b/test/lib/ansible_test/_internal/compat/yaml.py index daa5ef0ed49854..e4dbb651b1b771 100644 --- a/test/lib/ansible_test/_internal/compat/yaml.py +++ b/test/lib/ansible_test/_internal/compat/yaml.py @@ -1,6 +1,8 @@ """PyYAML compatibility.""" from __future__ import annotations +import typing as t + from functools import ( partial, ) @@ -13,7 +15,7 @@ YAML_IMPORT_ERROR = ex else: try: - _SafeLoader = _yaml.CSafeLoader + _SafeLoader = _yaml.CSafeLoader # type: t.Union[t.Type[_yaml.CSafeLoader], t.Type[_yaml.SafeLoader]] except AttributeError: _SafeLoader = _yaml.SafeLoader diff --git a/test/lib/ansible_test/_internal/completion.py b/test/lib/ansible_test/_internal/completion.py index 86674cb2ff27e6..afa437a4055c24 100644 --- a/test/lib/ansible_test/_internal/completion.py +++ b/test/lib/ansible_test/_internal/completion.py @@ -3,6 +3,7 @@ import abc import dataclasses +import enum import os import typing as t @@ -21,6 +22,30 @@ data_context, ) +from .become import ( + SUPPORTED_BECOME_METHODS, +) + + +class CGroupVersion(enum.Enum): + """The control group version(s) required by a container.""" + NONE = 'none' + V1_ONLY = 'v1-only' + V2_ONLY = 'v2-only' + V1_V2 = 'v1-v2' + + def __repr__(self) -> str: + return f'{self.__class__.__name__}.{self.name}' + + +class AuditMode(enum.Enum): + """The audit requirements of a container.""" + NONE = 'none' + REQUIRED = 'required' + + def __repr__(self) -> str: + return f'{self.__class__.__name__}.{self.name}' + @dataclasses.dataclass(frozen=True) class CompletionConfig(metaclass=abc.ABCMeta): @@ -79,6 +104,7 @@ def get_python_path(self, version): # type: (str) -> str class RemoteCompletionConfig(CompletionConfig): """Base class for completion configuration of remote environments provisioned through Ansible Core CI.""" provider: t.Optional[str] = None + arch: t.Optional[str] = None @property def platform(self): @@ -99,6 +125,9 @@ def __post_init__(self): if not self.provider: raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.') + if not self.arch: + raise Exception(f'Remote completion entry "{self.name}" must provide a "arch" setting.') + @dataclasses.dataclass(frozen=True) class InventoryCompletionConfig(CompletionConfig): @@ -132,6 +161,8 @@ class DockerCompletionConfig(PythonCompletionConfig): """Configuration for Docker containers.""" image: str = '' seccomp: str = 'default' + cgroup: str = CGroupVersion.V1_V2.value + audit: str = AuditMode.REQUIRED.value # most containers need this, so the default is required, leaving it to be opt-out for containers which don't need it placeholder: bool = False @property @@ -139,6 +170,22 @@ def is_default(self): """True if the completion entry is only used for defaults, otherwise False.""" return False + @property + def audit_enum(self) -> AuditMode: + """The audit requirements for the container. Raises an exception if the value is invalid.""" + try: + return AuditMode(self.audit) + except ValueError: + raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None + + @property + def cgroup_enum(self) -> CGroupVersion: + """The control group version(s) required by the container. Raises an exception if the value is invalid.""" + try: + return CGroupVersion(self.cgroup) + except ValueError: + raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.cgroup}" for the "cgroup" setting.') from None + def __post_init__(self): if not self.image: raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.') @@ -146,20 +193,36 @@ def __post_init__(self): if not self.supported_pythons and not self.placeholder: raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.') + # verify properties can be correctly parsed to enums + assert self.audit_enum + assert self.cgroup_enum + @dataclasses.dataclass(frozen=True) class NetworkRemoteCompletionConfig(RemoteCompletionConfig): """Configuration for remote network platforms.""" collection: str = '' connection: str = '' + placeholder: bool = False + + def __post_init__(self): + if not self.placeholder: + super().__post_init__() @dataclasses.dataclass(frozen=True) class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig): """Configuration for remote POSIX platforms.""" + become: t.Optional[str] = None placeholder: bool = False def __post_init__(self): + if not self.placeholder: + super().__post_init__() + + if self.become and self.become not in SUPPORTED_BECOME_METHODS: + raise Exception(f'POSIX remote completion entry "{self.name}" setting "become" must be omitted or one of: {", ".join(SUPPORTED_BECOME_METHODS)}') + if not self.supported_pythons: if self.version and not self.placeholder: raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.') @@ -211,9 +274,9 @@ def filter_completion( controller_only=False, # type: bool include_defaults=False, # type: bool ): # type: (...) -> t.Dict[str, TCompletionConfig] - """Return a the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified.""" + """Return the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified.""" if controller_only: - completion = {name: config for name, config in completion.items() if config.controller_supported} + completion = {name: config for name, config in completion.items() if isinstance(config, PosixCompletionConfig) and config.controller_supported} if not include_defaults: completion = {name: config for name, config in completion.items() if not config.is_default} diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py index e5c213f772130d..4061dd8ae56e9b 100644 --- a/test/lib/ansible_test/_internal/config.py +++ b/test/lib/ansible_test/_internal/config.py @@ -1,6 +1,7 @@ """Configuration classes.""" from __future__ import annotations +import dataclasses import enum import os import sys @@ -10,6 +11,7 @@ display, verify_sys_executable, version_to_str, + type_guard, ) from .util_common import ( @@ -47,27 +49,20 @@ def __str__(self): return self.name.lower() -class ParsedRemote: - """A parsed version of a "remote" string.""" - def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None - self.arch = arch - self.platform = platform - self.version = version +@dataclasses.dataclass(frozen=True) +class ModulesConfig: + """Configuration for modules.""" + python_requires: str + python_versions: tuple[str, ...] + controller_only: bool - @staticmethod - def parse(value): # type: (str) -> t.Optional['ParsedRemote'] - """Return a ParsedRemote from the given value or None if the syntax is invalid.""" - parts = value.split('/') - if len(parts) == 2: - arch = None - platform, version = parts - elif len(parts) == 3: - arch, platform, version = parts - else: - return None - - return ParsedRemote(arch, platform, version) +@dataclasses.dataclass(frozen=True) +class ContentConfig: + """Configuration for all content.""" + modules: ModulesConfig + python_versions: tuple[str, ...] + py2_support: bool class EnvironmentConfig(CommonConfig): @@ -81,6 +76,10 @@ def __init__(self, args, command): # type: (t.Any, str) -> None self.pypi_proxy = args.pypi_proxy # type: bool self.pypi_endpoint = args.pypi_endpoint # type: t.Optional[str] + # Populated by content_config.get_content_config on the origin. + # Serialized and passed to delegated instances to avoid parsing a second time. + self.content_config = None # type: t.Optional[ContentConfig] + # Set by check_controller_python once HostState has been created by prepare_profiles. # This is here for convenience, to avoid needing to pass HostState to some functions which already have access to EnvironmentConfig. self.controller_python = None # type: t.Optional[PythonConfig] @@ -96,7 +95,7 @@ def __init__(self, args, command): # type: (t.Any, str) -> None not isinstance(self.controller, OriginConfig) or isinstance(self.controller.python, VirtualPythonConfig) or self.controller.python.version != version_to_str(sys.version_info[:2]) - or verify_sys_executable(self.controller.python.path) + or bool(verify_sys_executable(self.controller.python.path)) ) self.docker_network = args.docker_network # type: t.Optional[str] @@ -112,6 +111,9 @@ def __init__(self, args, command): # type: (t.Any, str) -> None self.delegate_args = [] # type: t.List[str] + self.dev_systemd_debug: bool = args.dev_systemd_debug + self.dev_probe_cgroups: t.Optional[str] = args.dev_probe_cgroups + def host_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None """Add the host files to the payload file list.""" config = self @@ -119,9 +121,11 @@ def host_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None if config.host_path: settings_path = os.path.join(config.host_path, 'settings.dat') state_path = os.path.join(config.host_path, 'state.dat') + config_path = os.path.join(config.host_path, 'config.dat') files.append((os.path.abspath(settings_path), settings_path)) files.append((os.path.abspath(state_path), state_path)) + files.append((os.path.abspath(config_path), config_path)) data_context().register_payload_callback(host_callback) @@ -161,16 +165,14 @@ def only_target(self, target_type): # type: (t.Type[THostConfig]) -> THostConfi def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig] """ Return a list of target host configurations. - Requires that there are one or more targets, all of the specified type. + Requires that there are one or more targets, all the specified type. """ if not self.targets: raise Exception('There must be one or more targets.') - for target in self.targets: - if not isinstance(target, target_type): - raise Exception(f'Target is {type(target_type)} instead of {target_type}.') + assert type_guard(self.targets, target_type) - return self.targets + return t.cast(t.List[THostConfig], self.targets) @property def target_type(self): # type: () -> t.Type[HostConfig] @@ -218,7 +220,7 @@ def __init__(self, args, command): # type: (t.Any, str) -> None self.failure_ok = getattr(args, 'failure_ok', False) # type: bool self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata() - self.metadata_path = None + self.metadata_path = None # type: t.Optional[str] if self.coverage_check: self.coverage = True @@ -238,7 +240,12 @@ class ShellConfig(EnvironmentConfig): def __init__(self, args): # type: (t.Any) -> None super().__init__(args, 'shell') + self.cmd = args.cmd # type: t.List[str] self.raw = args.raw # type: bool + self.check_layout = self.delegate # allow shell to be used without a valid layout as long as no delegation is required + self.interactive = sys.stdin.isatty() and not args.cmd # delegation should only be interactive when stdin is a TTY and no command was given + self.export = args.export # type: t.Optional[str] + self.display_stderr = True class SanityConfig(TestConfig): @@ -254,7 +261,7 @@ def __init__(self, args): # type: (t.Any) -> None self.keep_git = args.keep_git # type: bool self.prime_venvs = args.prime_venvs # type: bool - self.info_stderr = self.lint + self.display_stderr = self.lint or self.list_tests if self.keep_git: def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None @@ -293,7 +300,7 @@ def __init__(self, args, command): # type: (t.Any, str) -> None if self.list_targets: self.explain = True - self.info_stderr = True + self.display_stderr = True def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" diff --git a/test/lib/ansible_test/_internal/connections.py b/test/lib/ansible_test/_internal/connections.py index ddf4e8df38c628..f63308e3f80875 100644 --- a/test/lib/ansible_test/_internal/connections.py +++ b/test/lib/ansible_test/_internal/connections.py @@ -3,7 +3,6 @@ import abc import shlex -import sys import tempfile import typing as t @@ -17,6 +16,7 @@ from .util import ( Display, + OutputStream, SubprocessError, retry, ) @@ -34,6 +34,7 @@ from .ssh import ( SshConnectionDetail, + ssh_options_to_list, ) from .become import ( @@ -46,10 +47,12 @@ class Connection(metaclass=abc.ABCMeta): @abc.abstractmethod def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" @@ -58,11 +61,9 @@ def extract_archive(self, src, # type: t.IO[bytes] ): """Extract the given archive file stream in the specified directory.""" - # This will not work on AIX. - # However, AIX isn't supported as a controller, which is where this would be needed. tar_cmd = ['tar', 'oxzf', '-', '-C', chdir] - retry(lambda: self.run(tar_cmd, stdin=src)) + retry(lambda: self.run(tar_cmd, stdin=src, capture=True)) def create_archive(self, chdir, # type: str @@ -75,18 +76,16 @@ def create_archive(self, gzip_cmd = ['gzip'] if exclude: - # This will not work on AIX. - # However, AIX isn't supported as a controller, which is where this would be needed. tar_cmd += ['--exclude', exclude] tar_cmd.append(name) - # Using gzip to compress the archive allows this to work on all POSIX systems we support, including AIX. + # Using gzip to compress the archive allows this to work on all POSIX systems we support. commands = [tar_cmd, gzip_cmd] sh_cmd = ['sh', '-c', ' | '.join(' '.join(shlex.quote(cmd) for cmd in command) for command in commands)] - retry(lambda: self.run(sh_cmd, stdout=dst)) + retry(lambda: self.run(sh_cmd, stdout=dst, capture=True)) class LocalConnection(Connection): @@ -96,10 +95,12 @@ def __init__(self, args): # type: (EnvironmentConfig) -> None def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" return run_command( @@ -109,6 +110,8 @@ def run(self, data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, ) @@ -121,7 +124,7 @@ def __init__(self, args, settings, become=None): # type: (EnvironmentConfig, Ss self.options = ['-i', settings.identity_file] - ssh_options = dict( + ssh_options: dict[str, t.Union[int, str]] = dict( BatchMode='yes', StrictHostKeyChecking='no', UserKnownHostsFile='/dev/null', @@ -129,15 +132,18 @@ def __init__(self, args, settings, become=None): # type: (EnvironmentConfig, Ss ServerAliveCountMax=4, ) - for ssh_option in sorted(ssh_options): - self.options.extend(['-o', f'{ssh_option}={ssh_options[ssh_option]}']) + ssh_options.update(settings.options) + + self.options.extend(ssh_options_to_list(ssh_options)) def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" options = list(self.options) @@ -147,7 +153,7 @@ def run(self, options.append('-q') - if not data and not stdin and not stdout and sys.stdin.isatty(): + if interactive: options.append('-tt') with tempfile.NamedTemporaryFile(prefix='ansible-test-ssh-debug-', suffix='.log') as ssh_logfile: @@ -170,6 +176,8 @@ def error_callback(ex): # type: (SubprocessError) -> None data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, error_callback=error_callback, ) @@ -212,10 +220,12 @@ def __init__(self, args, container_id, user=None): # type: (EnvironmentConfig, def run(self, command, # type: t.List[str] - capture=False, # type: bool + capture, # type: bool + interactive=False, # type: bool data=None, # type: t.Optional[str] stdin=None, # type: t.Optional[t.IO[bytes]] stdout=None, # type: t.Optional[t.IO[bytes]] + output_stream=None, # type: t.Optional[OutputStream] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return the result.""" options = [] @@ -223,7 +233,7 @@ def run(self, if self.user: options.extend(['--user', self.user]) - if not data and not stdin and not stdout and sys.stdin.isatty(): + if interactive: options.append('-it') return docker_exec( @@ -235,6 +245,8 @@ def run(self, data=data, stdin=stdin, stdout=stdout, + interactive=interactive, + output_stream=output_stream, ) def inspect(self): # type: () -> DockerInspect diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py index cac7240872e84f..609e3cfcfdd3d6 100644 --- a/test/lib/ansible_test/_internal/constants.py +++ b/test/lib/ansible_test/_internal/constants.py @@ -6,6 +6,8 @@ REMOTE_ONLY_PYTHON_VERSIONS, ) +STATUS_HOST_CONNECTION_ERROR = 4 + # Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True. # This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time. SOFT_RLIMIT_NOFILE = 1024 diff --git a/test/lib/ansible_test/_internal/containers.py b/test/lib/ansible_test/_internal/containers.py index 7ffbfb4c20c30e..e5fe8e94d2c775 100644 --- a/test/lib/ansible_test/_internal/containers.py +++ b/test/lib/ansible_test/_internal/containers.py @@ -15,7 +15,6 @@ ApplicationError, SubprocessError, display, - get_host_ip, sanitize_host_name, ) @@ -35,15 +34,19 @@ from .docker_util import ( ContainerNotFoundError, DockerInspect, + docker_create, docker_exec, docker_inspect, + docker_network_inspect, docker_pull, docker_rm, docker_run, docker_start, get_docker_container_id, get_docker_host_ip, + get_podman_host_ip, require_docker, + detect_host_properties, ) from .ansible_util import ( @@ -80,6 +83,10 @@ SshConnection, ) +from .thread import ( + mutex, +) + # information about support containers provisioned by the current ansible-test instance support_containers = {} # type: t.Dict[str, ContainerDescriptor] support_containers_mutex = threading.Lock() @@ -103,7 +110,7 @@ def run_support_container( args, # type: EnvironmentConfig context, # type: str image, # type: str - name, # type: name + name, # type: str ports, # type: t.List[int] aliases=None, # type: t.Optional[t.List[str]] start=True, # type: bool @@ -138,10 +145,10 @@ def run_support_container( if current_container_id: publish_ports = False # publishing ports is pointless if already running in a docker container - options = (options or []) + ['--name', name] + options = (options or []) if start: - options.append('-d') + options.append('-dt') # the -t option is required to cause systemd in the container to log output to the console if publish_ports: for port in ports: @@ -151,6 +158,10 @@ def run_support_container( for key, value in env.items(): options.extend(['--env', '%s=%s' % (key, value)]) + max_open_files = detect_host_properties(args).max_open_files + + options.extend(['--ulimit', 'nofile=%s' % max_open_files]) + support_container_id = None if allow_existing: @@ -175,6 +186,9 @@ def run_support_container( if not support_container_id: docker_rm(args, name) + if args.dev_systemd_debug: + options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug')) + if support_container_id: display.info('Using existing "%s" container.' % name) running = True @@ -182,7 +196,7 @@ def run_support_container( else: display.info('Starting new "%s" container.' % name) docker_pull(args, image) - support_container_id = docker_run(args, image, options, create_only=not start, cmd=cmd) + support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd) running = start existing = False @@ -220,10 +234,130 @@ def run_support_container( return descriptor +def run_container( + args: EnvironmentConfig, + image: str, + name: str, + options: t.Optional[list[str]], + cmd: t.Optional[list[str]] = None, + create_only: bool = False, +) -> str: + """Run a container using the given docker image.""" + options = list(options or []) + cmd = list(cmd or []) + + options.extend(['--name', name]) + + network = get_docker_preferred_network_name(args) + + if is_docker_user_defined_network(network): + # Only when the network is not the default bridge network. + options.extend(['--network', network]) + + for _iteration in range(1, 3): + try: + if create_only: + stdout = docker_create(args, image, options, cmd)[0] + else: + stdout = docker_run(args, image, options, cmd)[0] + except SubprocessError as ex: + display.error(ex.message) + display.warning(f'Failed to run docker image "{image}". Waiting a few seconds before trying again.') + docker_rm(args, name) # podman doesn't remove containers after create if run fails + time.sleep(3) + else: + if args.explain: + stdout = ''.join(random.choice('0123456789abcdef') for _iteration in range(64)) + + return stdout.strip() + + raise ApplicationError(f'Failed to run docker image "{image}".') + + +def start_container(args: EnvironmentConfig, container_id: str) -> tuple[t.Optional[str], t.Optional[str]]: + """Start a docker container by name or ID.""" + options: list[str] = [] + + for _iteration in range(1, 3): + try: + return docker_start(args, container_id, options) + except SubprocessError as ex: + display.error(ex.message) + display.warning(f'Failed to start docker container "{container_id}". Waiting a few seconds before trying again.') + time.sleep(3) + + raise ApplicationError(f'Failed to start docker container "{container_id}".') + + +def get_container_ip_address(args: EnvironmentConfig, container: DockerInspect) -> t.Optional[str]: + """Return the IP address of the container for the preferred docker network.""" + if container.networks: + network_name = get_docker_preferred_network_name(args) + + if not network_name: + # Sort networks and use the first available. + # This assumes all containers will have access to the same networks. + network_name = sorted(container.networks.keys()).pop(0) + + ipaddress = container.networks[network_name]['IPAddress'] + else: + ipaddress = container.network_settings['IPAddress'] + + if not ipaddress: + return None + + return ipaddress + + +@mutex +def get_docker_preferred_network_name(args: EnvironmentConfig) -> t.Optional[str]: + """ + Return the preferred network name for use with Docker. The selection logic is: + - the network selected by the user with `--docker-network` + - the network of the currently running docker container (if any) + - the default docker network (returns None) + """ + try: + return get_docker_preferred_network_name.network # type: ignore[attr-defined] + except AttributeError: + pass + + network = None + + if args.docker_network: + network = args.docker_network + else: + current_container_id = get_docker_container_id() + + if current_container_id: + # Make sure any additional containers we launch use the same network as the current container we're running in. + # This is needed when ansible-test is running in a container that is not connected to Docker's default network. + container = docker_inspect(args, current_container_id, always=True) + network = container.get_network_name() + + # The default docker behavior puts containers on the same network. + # The default podman behavior puts containers on isolated networks which don't allow communication between containers or network disconnect. + # Starting with podman version 2.1.0 rootless containers are able to join networks. + # Starting with podman version 2.2.0 containers can be disconnected from networks. + # To maintain feature parity with docker, detect and use the default "podman" network when running under podman. + if network is None and require_docker().command == 'podman' and docker_network_inspect(args, 'podman', always=True): + network = 'podman' + + get_docker_preferred_network_name.network = network # type: ignore[attr-defined] + + return network + + +def is_docker_user_defined_network(network: str) -> bool: + """Return True if the network being used is a user-defined network.""" + return bool(network) and network != 'bridge' + + +@mutex def get_container_database(args): # type: (EnvironmentConfig) -> ContainerDatabase """Return the current container database, creating it as needed, or returning the one provided on the command line through delegation.""" try: - return get_container_database.database + return get_container_database.database # type: ignore[attr-defined] except AttributeError: pass @@ -236,9 +370,9 @@ def get_container_database(args): # type: (EnvironmentConfig) -> ContainerDatab display.info('>>> Container Database\n%s' % json.dumps(database.to_dict(), indent=4, sort_keys=True), verbosity=3) - get_container_database.database = database + get_container_database.database = database # type: ignore[attr-defined] - return get_container_database.database + return database class ContainerAccess: @@ -286,7 +420,7 @@ def from_dict(data): # type: (t.Dict[str, t.Any]) -> ContainerAccess def to_dict(self): # type: () -> t.Dict[str, t.Any] """Return a dict of the current instance.""" - value = dict( + value: t.Dict[str, t.Any] = dict( host_ip=self.host_ip, names=self.names, ) @@ -350,8 +484,12 @@ def create_container_database(args): # type: (EnvironmentConfig) -> ContainerDa for name, container in support_containers.items(): if container.details.published_ports: + if require_docker().command == 'podman': + host_ip_func = get_podman_host_ip + else: + host_ip_func = get_docker_host_ip published_access = ContainerAccess( - host_ip=get_docker_host_ip(), + host_ip=host_ip_func(), names=container.aliases, ports=None, forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()), @@ -370,7 +508,7 @@ def create_container_database(args): # type: (EnvironmentConfig) -> ContainerDa elif require_docker().command == 'podman': # published ports for rootless podman containers should be accessible from the host's IP container_access = ContainerAccess( - host_ip=get_host_ip(), + host_ip=get_podman_host_ip(), names=container.aliases, ports=None, forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()), @@ -457,7 +595,7 @@ def close(self): # type: () -> None def support_container_context( args, # type: EnvironmentConfig ssh, # type: t.Optional[SshConnectionDetail] -): # type: (...) -> t.Optional[ContainerDatabase] +): # type: (...) -> t.Iterator[t.Optional[ContainerDatabase]] """Create a context manager for integration tests that use support containers.""" if not isinstance(args, (IntegrationConfig, UnitsConfig, SanityConfig, ShellConfig)): yield None # containers are only needed for commands that have targets (hosts or pythons) @@ -514,7 +652,7 @@ def create_support_container_context( try: port_forwards = process.collect_port_forwards() - contexts = {} + contexts = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]] for forward, forwarded_port in port_forwards.items(): access_host, access_port = forward @@ -567,7 +705,7 @@ def __init__(self, def start(self, args): # type: (EnvironmentConfig) -> None """Start the container. Used for containers which are created, but not started.""" - docker_start(args, self.name) + start_container(args, self.name) self.register(args) @@ -577,7 +715,7 @@ def register(self, args): # type: (EnvironmentConfig) -> SupportContainer raise Exception('Container already registered: %s' % self.name) try: - container = docker_inspect(args, self.container_id) + container = docker_inspect(args, self.name) except ContainerNotFoundError: if not args.explain: raise @@ -594,7 +732,7 @@ def register(self, args): # type: (EnvironmentConfig) -> SupportContainer ), )) - support_container_ip = container.get_ip_address() + support_container_ip = get_container_ip_address(args, container) if self.publish_ports: # inspect the support container to locate the published ports @@ -659,7 +797,7 @@ def cleanup_containers(args): # type: (EnvironmentConfig) -> None if container.cleanup == CleanupMode.YES: docker_rm(args, container.container_id) elif container.cleanup == CleanupMode.INFO: - display.notice('Remember to run `docker rm -f %s` when finished testing.' % container.name) + display.notice(f'Remember to run `{require_docker().command} rm -f {container.name}` when finished testing.') def create_hosts_entries(context): # type: (t.Dict[str, ContainerAccess]) -> t.List[str] @@ -702,8 +840,8 @@ def create_container_hooks( else: managed_type = 'posix' - control_state = {} - managed_state = {} + control_state = {} # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]] + managed_state = {} # type: t.Dict[str, t.Tuple[t.List[str], t.List[SshProcess]]] def pre_target(target): """Configure hosts for SSH port forwarding required by the specified target.""" @@ -722,7 +860,7 @@ def post_target(target): def create_managed_contexts(control_contexts): # type: (t.Dict[str, t.Dict[str, ContainerAccess]]) -> t.Dict[str, t.Dict[str, ContainerAccess]] """Create managed contexts from the given control contexts.""" - managed_contexts = {} + managed_contexts = {} # type: t.Dict[str, t.Dict[str, ContainerAccess]] for context_name, control_context in control_contexts.items(): managed_context = managed_contexts[context_name] = {} @@ -789,8 +927,8 @@ def forward_ssh_ports( hosts_entries = create_hosts_entries(test_context) inventory = generate_ssh_inventory(ssh_connections) - with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: - run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries)) + with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str + run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries)) ssh_processes = [] # type: t.List[SshProcess] @@ -822,8 +960,8 @@ def cleanup_ssh_ports( inventory = generate_ssh_inventory(ssh_connections) - with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: - run_playbook(args, inventory_path, playbook, dict(hosts_entries=hosts_entries)) + with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str + run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries)) if ssh_processes: for process in ssh_processes: diff --git a/test/lib/ansible_test/_internal/content_config.py b/test/lib/ansible_test/_internal/content_config.py index 10574cc0b6a97b..39a8d4125cad3c 100644 --- a/test/lib/ansible_test/_internal/content_config.py +++ b/test/lib/ansible_test/_internal/content_config.py @@ -2,6 +2,7 @@ from __future__ import annotations import os +import pickle import typing as t from .constants import ( @@ -21,6 +22,7 @@ ) from .io import ( + open_binary_file, read_text_file, ) @@ -28,54 +30,59 @@ ApplicationError, display, str_to_version, - cache, ) from .data import ( data_context, ) +from .config import ( + EnvironmentConfig, + ContentConfig, + ModulesConfig, +) MISSING = object() -class BaseConfig: - """Base class for content configuration.""" - def __init__(self, data): # type: (t.Any) -> None - if not isinstance(data, dict): - raise Exception('config must be type `dict` not `%s`' % type(data)) - +def parse_modules_config(data: t.Any) -> ModulesConfig: + """Parse the given dictionary as module config and return it.""" + if not isinstance(data, dict): + raise Exception('config must be type `dict` not `%s`' % type(data)) -class ModulesConfig(BaseConfig): - """Configuration for modules.""" - def __init__(self, data): # type: (t.Any) -> None - super().__init__(data) + python_requires = data.get('python_requires', MISSING) - python_requires = data.get('python_requires', MISSING) + if python_requires == MISSING: + raise KeyError('python_requires is required') - if python_requires == MISSING: - raise KeyError('python_requires is required') + return ModulesConfig( + python_requires=python_requires, + python_versions=parse_python_requires(python_requires), + controller_only=python_requires == 'controller', + ) - self.python_requires = python_requires - self.python_versions = parse_python_requires(python_requires) - self.controller_only = python_requires == 'controller' +def parse_content_config(data: t.Any) -> ContentConfig: + """Parse the given dictionary as content config and return it.""" + if not isinstance(data, dict): + raise Exception('config must be type `dict` not `%s`' % type(data)) -class ContentConfig(BaseConfig): - """Configuration for all content.""" - def __init__(self, data): # type: (t.Any) -> None - super().__init__(data) + # Configuration specific to modules/module_utils. + modules = parse_modules_config(data.get('modules', {})) - # Configuration specific to modules/module_utils. - self.modules = ModulesConfig(data.get('modules', {})) + # Python versions supported by the controller, combined with Python versions supported by modules/module_utils. + # Mainly used for display purposes and to limit the Python versions used for sanity tests. + python_versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS + if version in CONTROLLER_PYTHON_VERSIONS or version in modules.python_versions) - # Python versions supported by the controller, combined with Python versions supported by modules/module_utils. - # Mainly used for display purposes and to limit the Python versions used for sanity tests. - self.python_versions = [version for version in SUPPORTED_PYTHON_VERSIONS - if version in CONTROLLER_PYTHON_VERSIONS or version in self.modules.python_versions] + # True if Python 2.x is supported. + py2_support = any(version for version in python_versions if str_to_version(version)[0] == 2) - # True if Python 2.x is supported. - self.py2_support = any(version for version in self.python_versions if str_to_version(version)[0] == 2) + return ContentConfig( + modules=modules, + python_versions=python_versions, + py2_support=py2_support, + ) def load_config(path): # type: (str) -> t.Optional[ContentConfig] @@ -95,7 +102,7 @@ def load_config(path): # type: (str) -> t.Optional[ContentConfig] return None try: - config = ContentConfig(yaml_value) + config = parse_content_config(yaml_value) except Exception as ex: # pylint: disable=broad-except display.warning('Ignoring config "%s" due a config parsing error: %s' % (path, ex)) return None @@ -105,13 +112,18 @@ def load_config(path): # type: (str) -> t.Optional[ContentConfig] return config -@cache -def get_content_config(): # type: () -> ContentConfig +def get_content_config(args): # type: (EnvironmentConfig) -> ContentConfig """ Parse and return the content configuration (if any) for the current collection. For ansible-core, a default configuration is used. Results are cached. """ + if args.host_path: + args.content_config = deserialize_content_config(os.path.join(args.host_path, 'config.dat')) + + if args.content_config: + return args.content_config + collection_config_path = 'tests/config.yml' config = None @@ -120,7 +132,7 @@ def get_content_config(): # type: () -> ContentConfig config = load_config(collection_config_path) if not config: - config = ContentConfig(dict( + config = parse_content_config(dict( modules=dict( python_requires='default', ), @@ -132,20 +144,36 @@ def get_content_config(): # type: () -> ContentConfig 'This collection provides the Python requirement: %s' % ( ', '.join(SUPPORTED_PYTHON_VERSIONS), config.modules.python_requires)) + args.content_config = config + return config -def parse_python_requires(value): # type: (t.Any) -> t.List[str] +def parse_python_requires(value): # type: (t.Any) -> tuple[str, ...] """Parse the given 'python_requires' version specifier and return the matching Python versions.""" if not isinstance(value, str): raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value)) + versions: tuple[str, ...] + if value == 'default': - versions = list(SUPPORTED_PYTHON_VERSIONS) + versions = SUPPORTED_PYTHON_VERSIONS elif value == 'controller': - versions = list(CONTROLLER_PYTHON_VERSIONS) + versions = CONTROLLER_PYTHON_VERSIONS else: specifier_set = SpecifierSet(value) - versions = [version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))] + versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))) return versions + + +def serialize_content_config(args: EnvironmentConfig, path: str) -> None: + """Serialize the content config to the given path. If the config has not been loaded, an empty config will be serialized.""" + with open_binary_file(path, 'wb') as config_file: + pickle.dump(args.content_config, config_file) + + +def deserialize_content_config(path: str) -> ContentConfig: + """Deserialize content config from the path.""" + with open_binary_file(path) as config_file: + return pickle.load(config_file) diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py index 023b5655aa87b5..62d063b2b7eb7a 100644 --- a/test/lib/ansible_test/_internal/core_ci.py +++ b/test/lib/ansible_test/_internal/core_ci.py @@ -1,6 +1,8 @@ """Access Ansible Core CI remote services.""" from __future__ import annotations +import abc +import dataclasses import json import os import re @@ -48,6 +50,65 @@ ) +@dataclasses.dataclass(frozen=True) +class Resource(metaclass=abc.ABCMeta): + """Base class for Ansible Core CI resources.""" + @abc.abstractmethod + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + + @abc.abstractmethod + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + + @property + @abc.abstractmethod + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + + +@dataclasses.dataclass(frozen=True) +class VmResource(Resource): + """Details needed to request a VM from Ansible Core CI.""" + platform: str + version: str + architecture: str + provider: str + tag: str + + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + return self.platform, self.version, self.architecture, self.provider + + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + return f'{self.platform} {self.version} ({self.architecture}) [{self.tag}] @{self.provider}' + + @property + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + return True + + +@dataclasses.dataclass(frozen=True) +class CloudResource(Resource): + """Details needed to request cloud credentials from Ansible Core CI.""" + platform: str + + def as_tuple(self) -> t.Tuple[str, str, str, str]: + """Return the resource as a tuple of platform, version, architecture and provider.""" + return self.platform, '', '', self.platform + + def get_label(self) -> str: + """Return a user-friendly label for this resource.""" + return self.platform + + @property + def persist(self) -> bool: + """True if the resource is persistent, otherwise false.""" + return False + + class AnsibleCoreCI: """Client for Ansible Core CI services.""" DEFAULT_ENDPOINT = 'https://ansible-core-ci.testing.ansible.com' @@ -55,16 +116,12 @@ class AnsibleCoreCI: def __init__( self, args, # type: EnvironmentConfig - platform, # type: str - version, # type: str - provider, # type: str - persist=True, # type: bool + resource, # type: Resource load=True, # type: bool - suffix=None, # type: t.Optional[str] ): # type: (...) -> None self.args = args - self.platform = platform - self.version = version + self.resource = resource + self.platform, self.version, self.arch, self.provider = self.resource.as_tuple() self.stage = args.remote_stage self.client = HttpClient(args) self.connection = None @@ -73,41 +130,39 @@ def __init__( self.default_endpoint = args.remote_endpoint or self.DEFAULT_ENDPOINT self.retries = 3 self.ci_provider = get_ci_provider() - self.provider = provider - self.name = '%s-%s' % (self.platform, self.version) + self.label = self.resource.get_label() - if suffix: - self.name += '-' + suffix + stripped_label = re.sub('[^A-Za-z0-9_.]+', '-', self.label).strip('-') - self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage)) + self.name = f"{stripped_label}-{self.stage}" # turn the label into something suitable for use as a filename + + self.path = os.path.expanduser(f'~/.ansible/test/instances/{self.name}') self.ssh_key = SshKey(args) - if persist and load and self._load(): + if self.resource.persist and load and self._load(): try: - display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Checking existing {self.label} instance using: {self._uri}', verbosity=1) self.connection = self.get(always_raise_on=[404]) - display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + display.info(f'Loaded existing {self.label} instance.', verbosity=1) except HttpError as ex: if ex.status != 404: raise self._clear() - display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Cleared stale {self.label} instance.', verbosity=1) self.instance_id = None self.endpoint = None - elif not persist: + elif not self.resource.persist: self.instance_id = None self.endpoint = None self._clear() if self.instance_id: - self.started = True + self.started = True # type: bool else: self.started = False self.instance_id = str(uuid.uuid4()) @@ -126,8 +181,7 @@ def available(self): def start(self): """Start instance.""" if self.started: - display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping started {self.label} instance.', verbosity=1) return None return self._start(self.ci_provider.prepare_core_ci_auth()) @@ -135,22 +189,19 @@ def start(self): def stop(self): """Stop instance.""" if not self.started: - display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping invalid {self.label} instance.', verbosity=1) return response = self.client.delete(self._uri) if response.status_code == 404: self._clear() - display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Cleared invalid {self.label} instance.', verbosity=1) return if response.status_code == 200: self._clear() - display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Stopped running {self.label} instance.', verbosity=1) return raise self._create_http_error(response) @@ -158,8 +209,7 @@ def stop(self): def get(self, tries=3, sleep=15, always_raise_on=None): # type: (int, int, t.Optional[t.List[int]]) -> t.Optional[InstanceConnection] """Get instance connection information.""" if not self.started: - display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id), - verbosity=1) + display.info(f'Skipping invalid {self.label} instance.', verbosity=1) return None if not always_raise_on: @@ -180,7 +230,7 @@ def get(self, tries=3, sleep=15, always_raise_on=None): # type: (int, int, t.Op if not tries or response.status_code in always_raise_on: raise error - display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + display.warning(f'{error}. Trying again after {sleep} seconds.') time.sleep(sleep) if self.args.explain: @@ -216,9 +266,7 @@ def get(self, tries=3, sleep=15, always_raise_on=None): # type: (int, int, t.Op status = 'running' if self.connection.running else 'starting' - display.info('Status update: %s/%s on instance %s is %s.' % - (self.platform, self.version, self.instance_id, status), - verbosity=1) + display.info(f'The {self.label} instance is {status}.', verbosity=1) return self.connection @@ -229,16 +277,15 @@ def wait(self, iterations=90): # type: (t.Optional[int]) -> None return time.sleep(10) - raise ApplicationError('Timeout waiting for %s/%s instance %s.' % - (self.platform, self.version, self.instance_id)) + raise ApplicationError(f'Timeout waiting for {self.label} instance.') @property def _uri(self): - return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id) + return f'{self.endpoint}/{self.stage}/{self.provider}/{self.instance_id}' def _start(self, auth): """Start instance.""" - display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1) + display.info(f'Initializing new {self.label} instance using: {self._uri}', verbosity=1) if self.platform == 'windows': winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1')) @@ -249,6 +296,7 @@ def _start(self, auth): config=dict( platform=self.platform, version=self.version, + architecture=self.arch, public_key=self.ssh_key.pub_contents, query=False, winrm_config=winrm_config, @@ -266,7 +314,7 @@ def _start(self, auth): self.started = True self._save() - display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1) + display.info(f'Started {self.label} instance.', verbosity=1) if self.args.explain: return {} @@ -277,8 +325,6 @@ def _start_endpoint(self, data, headers): # type: (t.Dict[str, t.Any], t.Dict[s tries = self.retries sleep = 15 - display.info('Trying endpoint: %s' % self.endpoint, verbosity=1) - while True: tries -= 1 response = self.client.put(self._uri, data=json.dumps(data), headers=headers) @@ -294,7 +340,7 @@ def _start_endpoint(self, data, headers): # type: (t.Dict[str, t.Any], t.Dict[s if not tries: raise error - display.warning('%s. Trying again after %d seconds.' % (error, sleep)) + display.warning(f'{error}. Trying again after {sleep} seconds.') time.sleep(sleep) def _clear(self): @@ -345,14 +391,14 @@ def _save(self): # type: () -> None def save(self): # type: () -> t.Dict[str, str] """Save instance details and return as a dictionary.""" return dict( - platform_version='%s/%s' % (self.platform, self.version), + label=self.resource.get_label(), instance_id=self.instance_id, endpoint=self.endpoint, ) @staticmethod def _create_http_error(response): # type: (HttpResponse) -> ApplicationError - """Return an exception created from the given HTTP resposne.""" + """Return an exception created from the given HTTP response.""" response_json = response.json() stack_trace = '' @@ -369,7 +415,7 @@ def _create_http_error(response): # type: (HttpResponse) -> ApplicationError traceback_lines = traceback.format_list(traceback_lines) trace = '\n'.join([x.rstrip() for x in traceback_lines]) - stack_trace = ('\nTraceback (from remote server):\n%s' % trace) + stack_trace = f'\nTraceback (from remote server):\n{trace}' else: message = str(response_json) @@ -379,7 +425,7 @@ def _create_http_error(response): # type: (HttpResponse) -> ApplicationError class CoreHttpError(HttpError): """HTTP response as an error.""" def __init__(self, status, remote_message, remote_stack_trace): # type: (int, str, str) -> None - super().__init__(status, '%s%s' % (remote_message, remote_stack_trace)) + super().__init__(status, f'{remote_message}{remote_stack_trace}') self.remote_message = remote_message self.remote_stack_trace = remote_stack_trace @@ -388,8 +434,8 @@ def __init__(self, status, remote_message, remote_stack_trace): # type: (int, s class SshKey: """Container for SSH key used to connect to remote instances.""" KEY_TYPE = 'rsa' # RSA is used to maintain compatibility with paramiko and EC2 - KEY_NAME = 'id_%s' % KEY_TYPE - PUB_NAME = '%s.pub' % KEY_NAME + KEY_NAME = f'id_{KEY_TYPE}' + PUB_NAME = f'{KEY_NAME}.pub' @mutex def __init__(self, args): # type: (EnvironmentConfig) -> None @@ -469,7 +515,7 @@ def generate_key_pair(self, args): # type: (EnvironmentConfig) -> t.Tuple[str, make_dirs(os.path.dirname(key)) if not os.path.isfile(key) or not os.path.isfile(pub): - run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key]) + run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key], capture=True) if args.explain: return key, pub @@ -502,6 +548,6 @@ def __init__(self, def __str__(self): if self.password: - return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password) + return f'{self.hostname}:{self.port} [{self.username}:{self.password}]' - return '%s:%s [%s]' % (self.hostname, self.port, self.username) + return f'{self.hostname}:{self.port} [{self.username}]' diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py index e705db76e0dd74..869a3a3a72d894 100644 --- a/test/lib/ansible_test/_internal/coverage_util.py +++ b/test/lib/ansible_test/_internal/coverage_util.py @@ -41,6 +41,10 @@ PythonConfig, ) +from .thread import ( + mutex, +) + def cover_python( args, # type: TestConfig @@ -48,7 +52,7 @@ def cover_python( cmd, # type: t.List[str] target_name, # type: str env, # type: t.Dict[str, str] - capture=False, # type: bool + capture, # type: bool data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] @@ -107,10 +111,11 @@ def get_coverage_environment( return env +@mutex def get_coverage_config(args): # type: (TestConfig) -> str """Return the path to the coverage config, creating the config if it does not already exist.""" try: - return get_coverage_config.path + return get_coverage_config.path # type: ignore[attr-defined] except AttributeError: pass @@ -122,11 +127,13 @@ def get_coverage_config(args): # type: (TestConfig) -> str temp_dir = tempfile.mkdtemp() atexit.register(lambda: remove_tree(temp_dir)) - path = get_coverage_config.path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME) + path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME) if not args.explain: write_text_file(path, coverage_config) + get_coverage_config.path = path # type: ignore[attr-defined] + return path diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py index c3b2187ca24463..42fa5a2ac791f8 100644 --- a/test/lib/ansible_test/_internal/data.py +++ b/test/lib/ansible_test/_internal/data.py @@ -9,6 +9,7 @@ ApplicationError, import_plugins, is_subdir, + is_valid_identifier, ANSIBLE_LIB_ROOT, ANSIBLE_TEST_ROOT, ANSIBLE_SOURCE_ROOT, @@ -34,11 +35,19 @@ InstalledSource, ) +from .provider.source.unsupported import ( + UnsupportedSource, +) + from .provider.layout import ( ContentLayout, LayoutProvider, ) +from .provider.layout.unsupported import ( + UnsupportedLayout, +) + class DataContext: """Data context providing details about the current execution environment for ansible-test.""" @@ -109,14 +118,20 @@ def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvi walk, # type: bool ): # type: (...) -> ContentLayout """Create a content layout using the given providers and root path.""" - layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk) + try: + layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk) + except ProviderNotFoundForPath: + layout_provider = UnsupportedLayout(root) try: # Begin the search for the source provider at the layout provider root. # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error. # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project. # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control. - source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk) + if isinstance(layout_provider, UnsupportedLayout): + source_provider = UnsupportedSource(layout_provider.root) # type: SourceProvider + else: + source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk) except ProviderNotFoundForPath: source_provider = UnversionedSource(layout_provider.root) @@ -161,6 +176,48 @@ def register_payload_callback(self, callback): # type: (t.Callable[[t.List[t.Tu """Register the given payload callback.""" self.payload_callbacks.append(callback) + def check_layout(self) -> None: + """Report an error if the layout is unsupported.""" + if self.content.unsupported: + raise ApplicationError(self.explain_working_directory()) + + def explain_working_directory(self) -> str: + """Return a message explaining the working directory requirements.""" + blocks = [ + 'The current working directory must be within the source tree being tested.', + '', + ] + + if ANSIBLE_SOURCE_ROOT: + blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/') + blocks.append('') + + cwd = os.getcwd() + + blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/') + blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/') + blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/') + blocks.append('') + blocks.append(f'Current working directory: {cwd}/') + + if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections': + blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/') + elif os.path.basename(cwd) == 'ansible_collections': + blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/') + elif 'ansible_collections' not in cwd.split(os.path.sep): + blocks.append('No "ansible_collections" parent directory was found.') + + if self.content.collection: + if not is_valid_identifier(self.content.collection.namespace): + blocks.append(f'The namespace "{self.content.collection.namespace}" is an invalid identifier or a reserved keyword.') + + if not is_valid_identifier(self.content.collection.name): + blocks.append(f'The name "{self.content.collection.name}" is an invalid identifier or a reserved keyword.') + + message = '\n'.join(blocks) + + return message + @cache def data_context(): # type: () -> DataContext @@ -173,21 +230,7 @@ def data_context(): # type: () -> DataContext for provider_type in provider_types: import_plugins('provider/%s' % provider_type) - try: - context = DataContext() - except ProviderNotFoundForPath: - options = [ - ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/', - ] - - if ANSIBLE_SOURCE_ROOT: - options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT) - - raise ApplicationError('''The current working directory must be at or below: - -%s - -Current working directory: %s''' % ('\n'.join(options), os.getcwd())) + context = DataContext() return context diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py index a5c404d9bc4c46..b3b8ad51dc59e7 100644 --- a/test/lib/ansible_test/_internal/delegation.py +++ b/test/lib/ansible_test/_internal/delegation.py @@ -7,11 +7,16 @@ import tempfile import typing as t +from .constants import ( + STATUS_HOST_CONNECTION_ERROR, +) + from .io import ( make_dirs, ) from .config import ( + CommonConfig, EnvironmentConfig, IntegrationConfig, ShellConfig, @@ -26,6 +31,7 @@ ANSIBLE_BIN_PATH, ANSIBLE_LIB_ROOT, ANSIBLE_TEST_ROOT, + OutputStream, ) from .util_common import ( @@ -35,6 +41,7 @@ from .containers import ( support_container_context, + ContainerDatabase, ) from .data import ( @@ -65,9 +72,13 @@ HostState, ) +from .content_config import ( + serialize_content_config, +) + @contextlib.contextmanager -def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState) -> None +def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState) -> t.Iterator[None] """Context manager for serialized host state during delegation.""" make_dirs(ResultType.TMP.path) @@ -78,6 +89,7 @@ def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState with tempfile.TemporaryDirectory(prefix='host-', dir=ResultType.TMP.path) as host_dir: args.host_settings.serialize(os.path.join(host_dir, 'settings.dat')) host_state.serialize(os.path.join(host_dir, 'state.dat')) + serialize_content_config(args, os.path.join(host_dir, 'config.dat')) args.host_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(host_dir)) @@ -87,8 +99,10 @@ def delegation_context(args, host_state): # type: (EnvironmentConfig, HostState args.host_path = None -def delegate(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None +def delegate(args, host_state, exclude, require): # type: (CommonConfig, HostState, t.List[str], t.List[str]) -> None """Delegate execution of ansible-test to another environment.""" + assert isinstance(args, EnvironmentConfig) + with delegation_context(args, host_state): if isinstance(args, TestConfig): args.metadata.ci_provider = get_ci_provider().code @@ -141,7 +155,7 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC if not args.allow_destructive: options.append('--allow-destructive') - with support_container_context(args, ssh) as containers: + with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) @@ -155,19 +169,27 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC os.path.join(content_root, ResultType.COVERAGE.relative_path), ] - con.run(['mkdir', '-p'] + writable_dirs) - con.run(['chmod', '777'] + writable_dirs) - con.run(['chmod', '755', working_directory]) - con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) - con.run(['useradd', pytest_user, '--create-home']) - con.run(insert_options(command, options + ['--requirements-mode', 'only'])) + con.run(['mkdir', '-p'] + writable_dirs, capture=True) + con.run(['chmod', '777'] + writable_dirs, capture=True) + con.run(['chmod', '755', working_directory], capture=True) + con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)], capture=True) + con.run(['useradd', pytest_user, '--create-home'], capture=True) + + con.run(insert_options(command, options + ['--requirements-mode', 'only']), capture=False) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: - con.disconnect_network(network) + try: + con.disconnect_network(network) + except SubprocessError: + display.warning( + 'Unable to disconnect network "%s" (this is normal under podman). ' + 'Tests will not be isolated from the network. Network-related tests may ' + 'misbehave.' % (network,) + ) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') @@ -177,14 +199,27 @@ def delegate_command(args, host_state, exclude, require): # type: (EnvironmentC con.user = pytest_user success = False + status = 0 try: - con.run(insert_options(command, options)) + # When delegating, preserve the original separate stdout/stderr streams, but only when the following conditions are met: + # 1) Display output is being sent to stderr. This indicates the output on stdout must be kept separate from stderr. + # 2) The delegation is non-interactive. Interactive mode, which generally uses a TTY, is not compatible with intercepting stdout/stderr. + # The downside to having separate streams is that individual lines of output from each are more likely to appear out-of-order. + output_stream = OutputStream.ORIGINAL if args.display_stderr and not args.interactive else None + con.run(insert_options(command, options), capture=False, interactive=args.interactive, output_stream=output_stream) success = True + except SubprocessError as ex: + status = ex.status + raise finally: if host_delegation: download_results(args, con, content_root, success) + if not success and status == STATUS_HOST_CONNECTION_ERROR: + for target in host_state.target_profiles: + target.on_target_failure() # when the controller is delegated, report failures after delegation fails + def insert_options(command, options): """Insert addition command line options into the given command and return the result.""" diff --git a/test/lib/ansible_test/_internal/dev/__init__.py b/test/lib/ansible_test/_internal/dev/__init__.py new file mode 100644 index 00000000000000..e7c9b7d54f9e43 --- /dev/null +++ b/test/lib/ansible_test/_internal/dev/__init__.py @@ -0,0 +1,2 @@ +"""Development and testing support code. Enabled through the use of `--dev-*` command line options.""" +from __future__ import annotations diff --git a/test/lib/ansible_test/_internal/dev/container_probe.py b/test/lib/ansible_test/_internal/dev/container_probe.py new file mode 100644 index 00000000000000..efce383d0f7d0e --- /dev/null +++ b/test/lib/ansible_test/_internal/dev/container_probe.py @@ -0,0 +1,216 @@ +"""Diagnostic utilities to probe container cgroup behavior during development and testing (both manual and integration).""" +from __future__ import annotations + +import dataclasses +import enum +import json +import os +import pathlib +import pwd +import typing as t + +from ..io import ( + read_text_file, + write_text_file, +) + +from ..util import ( + display, + ANSIBLE_TEST_TARGET_ROOT, +) + +from ..config import ( + EnvironmentConfig, +) + +from ..docker_util import ( + LOGINUID_NOT_SET, + docker_exec, + get_docker_info, + get_podman_remote, + require_docker, +) + +from ..host_configs import ( + DockerConfig, +) + +from ..cgroup import ( + CGroupEntry, + CGroupPath, + MountEntry, + MountType, +) + + +class CGroupState(enum.Enum): + """The expected state of a cgroup related mount point.""" + HOST = enum.auto() + PRIVATE = enum.auto() + SHADOWED = enum.auto() + + +@dataclasses.dataclass(frozen=True) +class CGroupMount: + """Details on a cgroup mount point that is expected to be present in the container.""" + path: str + type: t.Optional[str] + writable: t.Optional[bool] + state: t.Optional[CGroupState] + + def __post_init__(self): + assert is_relative_to(pathlib.PurePosixPath(self.path), CGroupPath.ROOT) + + if self.type is None: + assert self.state is None + elif self.type == MountType.TMPFS: + assert self.writable is True + assert self.state is None + else: + assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2) + assert self.state is not None + + +def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None: + """Check the running container to examine the state of the cgroup hierarchies.""" + cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo'] + + stdout = docker_exec(args, container_name, cmd, capture=True)[0] + cgroups_stdout, mounts_stdout = stdout.split('\n\n') + + cgroups = CGroupEntry.loads(cgroups_stdout) + mounts = MountEntry.loads(mounts_stdout) + + mounts = tuple(mount for mount in mounts if is_relative_to(mount.path, CGroupPath.ROOT)) + + mount_cgroups: dict[MountEntry, CGroupEntry] = {} + probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {} + + for cgroup in cgroups: + if cgroup.subsystem: + mount = ([mount for mount in mounts if + mount.type == MountType.CGROUP_V1 and + is_relative_to(mount.path, cgroup.root_path) and + is_relative_to(cgroup.full_path, mount.path) + ] or [None])[-1] + else: + mount = ([mount for mount in mounts if + mount.type == MountType.CGROUP_V2 and + mount.path == cgroup.root_path + ] or [None])[-1] + + if mount: + mount_cgroups[mount] = cgroup + + for mount in mounts: + probe_paths[mount.path] = None + + if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path + probe_paths[cgroup.full_path] = None + + probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py')) + probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths] + probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0]) + + for path in probe_paths: + probe_paths[path] = probe_results[str(path)] + + remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts} + results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {} + + for expected_mount in expected_mounts: + expected_path = pathlib.PurePosixPath(expected_mount.path) + + if not (actual_mount := remaining_mounts.pop(expected_path, None)): + results[expected_path] = (False, 'not mounted') + continue + + actual_mount_write_error = probe_paths[actual_mount.path] + actual_mount_errors = [] + + if cgroup := mount_cgroups.get(actual_mount): + if expected_mount.state == CGroupState.SHADOWED: + actual_mount_errors.append('unexpected cgroup association') + + if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST: + results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup') + + if cgroup.full_path == actual_mount.path: + if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE: + actual_mount_errors.append('unexpected mount') + else: + cgroup_write_error = probe_paths[cgroup.full_path] + cgroup_errors = [] + + if expected_mount.state == CGroupState.SHADOWED: + cgroup_errors.append('unexpected cgroup association') + + if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE: + cgroup_errors.append('unexpected cgroup') + + if cgroup_write_error: + cgroup_errors.append(cgroup_write_error) + + if cgroup_errors: + results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}') + else: + results[cgroup.full_path] = (True, 'directory (writable)') + elif expected_mount.state not in (None, CGroupState.SHADOWED): + actual_mount_errors.append('missing cgroup association') + + if actual_mount.type != expected_mount.type and expected_mount.type is not None: + actual_mount_errors.append(f'type not {expected_mount.type}') + + if bool(actual_mount_write_error) == expected_mount.writable: + actual_mount_errors.append(f'{actual_mount_write_error or "writable"}') + + if actual_mount_errors: + results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}') + else: + results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})') + + for remaining_mount in remaining_mounts.values(): + remaining_mount_write_error = probe_paths[remaining_mount.path] + + results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})') + + identity = get_identity(args, config, container_name) + messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())] + message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages) + + display.info(f'>>> Container: {identity}\n{message.rstrip()}') + + if args.dev_probe_cgroups: + write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message) + + +def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str): + """Generate and return an identity string to use when logging test results.""" + engine = require_docker().command + + try: + loginuid = int(read_text_file('/proc/self/loginuid')) + except FileNotFoundError: + loginuid = LOGINUID_NOT_SET + + user = pwd.getpwuid(os.getuid()).pw_name + login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name + remote = engine == 'podman' and get_podman_remote() + + tags = ( + config.name, + engine, + f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}', + f'remote={remote}', + f'user={user}', + f'loginuid={login_user}', + container_name, + ) + + return '|'.join(tags) + + +def is_relative_to(first: pathlib.PurePosixPath, second: t.Union[pathlib.PurePosixPath, str]) -> bool: + """Return True if path `first` is relative to path `second`, otherwise return False.""" + second_path = pathlib.PurePosixPath(second) + return second_path == first or second_path in first.parents diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py index da113f02a1e1c5..398145b49d5bc0 100644 --- a/test/lib/ansible_test/_internal/docker_util.py +++ b/test/lib/ansible_test/_internal/docker_util.py @@ -1,18 +1,17 @@ """Functions for accessing docker via the docker cli.""" from __future__ import annotations +import dataclasses +import enum import json import os -import random +import pathlib +import re import socket import time import urllib.parse import typing as t -from .io import ( - read_text_file, -) - from .util import ( ApplicationError, common_environment, @@ -20,6 +19,7 @@ find_executable, SubprocessError, cache, + OutputStream, ) from .util_common import ( @@ -29,7 +29,17 @@ from .config import ( CommonConfig, - EnvironmentConfig, +) + +from .thread import ( + mutex, + named_lock, +) + +from .cgroup import ( + CGroupEntry, + MountEntry, + MountType, ) DOCKER_COMMANDS = [ @@ -37,6 +47,379 @@ 'podman', ] +UTILITY_IMAGE = 'quay.io/ansible/ansible-test-utility-container:2.0.0' + +# Max number of open files in a docker container. +# Passed with --ulimit option to the docker run command. +MAX_NUM_OPEN_FILES = 10240 + +# The value of /proc/*/loginuid when it is not set. +# It is a reserved UID, which is the maximum 32-bit unsigned integer value. +# See: https://access.redhat.com/solutions/25404 +LOGINUID_NOT_SET = 4294967295 + + +class DockerInfo: + """The results of `docker info` and `docker version` for the container runtime.""" + + @classmethod + def init(cls, args: CommonConfig) -> DockerInfo: + """Initialize and return a DockerInfo instance.""" + command = require_docker().command + + info_stdout = docker_command(args, ['info', '--format', '{{ json . }}'], capture=True, always=True)[0] + info = json.loads(info_stdout) + + if server_errors := info.get('ServerErrors'): + # This can occur when a remote docker instance is in use and the instance is not responding, such as when the system is still starting up. + # In that case an error such as the following may be returned: + # error during connect: Get "http://{hostname}:2375/v1.24/info": dial tcp {ip_address}:2375: connect: no route to host + raise ApplicationError('Unable to get container host information: ' + '\n'.join(server_errors)) + + version_stdout = docker_command(args, ['version', '--format', '{{ json . }}'], capture=True, always=True)[0] + version = json.loads(version_stdout) + + info = DockerInfo(args, command, info, version) + + return info + + def __init__(self, args: CommonConfig, engine: str, info: dict[str, t.Any], version: dict[str, t.Any]) -> None: + self.args = args + self.engine = engine + self.info = info + self.version = version + + @property + def client(self) -> dict[str, t.Any]: + """The client version details.""" + client = self.version.get('Client') + + if not client: + raise ApplicationError('Unable to get container host client information.') + + return client + + @property + def server(self) -> dict[str, t.Any]: + """The server version details.""" + server = self.version.get('Server') + + if not server: + if self.engine == 'podman': + # Some Podman versions always report server version info (verified with 1.8.0 and 1.9.3). + # Others do not unless Podman remote is being used. + # To provide consistency, use the client version if the server version isn't provided. + # See: https://github.com/containers/podman/issues/2671#issuecomment-804382934 + return self.client + + raise ApplicationError('Unable to get container host server information.') + + return server + + @property + def client_version(self) -> str: + """The client version.""" + return self.client['Version'] + + @property + def server_version(self) -> str: + """The server version.""" + return self.server['Version'] + + @property + def client_major_minor_version(self) -> tuple[int, int]: + """The client major and minor version.""" + major, minor = self.client_version.split('.')[:2] + return int(major), int(minor) + + @property + def server_major_minor_version(self) -> tuple[int, int]: + """The server major and minor version.""" + major, minor = self.server_version.split('.')[:2] + return int(major), int(minor) + + @property + def cgroupns_option_supported(self) -> bool: + """Return True if the `--cgroupns` option is supported, otherwise return False.""" + if self.engine == 'docker': + # Docker added support for the `--cgroupns` option in version 20.10. + # Both the client and server must support the option to use it. + # See: https://docs.docker.com/engine/release-notes/#20100 + return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10) + + raise NotImplementedError(self.engine) + + @property + def cgroup_version(self) -> int: + """The cgroup version of the container host.""" + info = self.info + host = info.get('host') + + # When the container host reports cgroup v1 it is running either cgroup v1 legacy mode or cgroup v2 hybrid mode. + # When the container host reports cgroup v2 it is running under cgroup v2 unified mode. + # See: https://github.com/containers/podman/blob/8356621249e36ed62fc7f35f12d17db9027ff076/libpod/info_linux.go#L52-L56 + # See: https://github.com/moby/moby/blob/d082bbcc0557ec667faca81b8b33bec380b75dac/daemon/info_unix.go#L24-L27 + + if host: + return int(host['cgroupVersion'].lstrip('v')) # podman + + try: + return int(info['CgroupVersion']) # docker + except KeyError: + pass + + # Docker 20.10 (API version 1.41) added support for cgroup v2. + # Unfortunately the client or server is too old to report the cgroup version. + # If the server is old, we can infer the cgroup version. + # Otherwise, we'll need to fall back to detection. + # See: https://docs.docker.com/engine/release-notes/#20100 + # See: https://docs.docker.com/engine/api/version-history/#v141-api-changes + + if self.server_major_minor_version < (20, 10): + return 1 # old docker server with only cgroup v1 support + + # Tell the user what versions they have and recommend they upgrade the client. + # Downgrading the server should also work, but we won't mention that. + message = ( + f'The Docker client version is {self.client_version}. ' + f'The Docker server version is {self.server_version}. ' + 'Upgrade your Docker client to version 20.10 or later.' + ) + + if detect_host_properties(self.args).cgroup_v2: + # Unfortunately cgroup v2 was detected on the Docker server. + # A newer client is needed to support the `--cgroupns` option for use with cgroup v2. + raise ApplicationError(f'Unsupported Docker client and server combination using cgroup v2. {message}') + + display.warning(f'Detected Docker server cgroup v1 using probing. {message}', unique=True) + + return 1 # docker server is using cgroup v1 (or cgroup v2 hybrid) + + @property + def docker_desktop_wsl2(self) -> bool: + """Return True if Docker Desktop integrated with WSL2 is detected, otherwise False.""" + info = self.info + + kernel_version = info.get('KernelVersion') + operating_system = info.get('OperatingSystem') + + dd_wsl2 = kernel_version and kernel_version.endswith('-WSL2') and operating_system == 'Docker Desktop' + + return dd_wsl2 + + @property + def description(self) -> str: + """Describe the container runtime.""" + tags = dict( + client=self.client_version, + server=self.server_version, + cgroup=f'v{self.cgroup_version}', + ) + + labels = [self.engine] + [f'{key}={value}' for key, value in tags.items()] + + if self.docker_desktop_wsl2: + labels.append('DD+WSL2') + + return f'Container runtime: {" ".join(labels)}' + + +@mutex +def get_docker_info(args: CommonConfig) -> DockerInfo: + """Return info for the current container runtime. The results are cached.""" + try: + return get_docker_info.info # type: ignore[attr-defined] + except AttributeError: + pass + + info = DockerInfo.init(args) + + display.info(info.description, verbosity=1) + + get_docker_info.info = info # type: ignore[attr-defined] + + return info + + +class SystemdControlGroupV1Status(enum.Enum): + """The state of the cgroup v1 systemd hierarchy on the container host.""" + SUBSYSTEM_MISSING = 'The systemd cgroup subsystem was not found.' + FILESYSTEM_NOT_MOUNTED = 'The "/sys/fs/cgroup/systemd" filesystem is not mounted.' + MOUNT_TYPE_NOT_CORRECT = 'The "/sys/fs/cgroup/systemd" mount type is not correct.' + VALID = 'The "/sys/fs/cgroup/systemd" mount is valid.' + + +@dataclasses.dataclass(frozen=True) +class ContainerHostProperties: + """Container host properties detected at run time.""" + audit_code: str + max_open_files: int + loginuid: t.Optional[int] + cgroup_v1: SystemdControlGroupV1Status + cgroup_v2: bool + + +@mutex +def detect_host_properties(args: CommonConfig) -> ContainerHostProperties: + """ + Detect and return properties of the container host. + + The information collected is: + + - The errno result from attempting to query the container host's audit status. + - The max number of open files supported by the container host to run containers. + This value may be capped to the maximum value used by ansible-test. + If the value is below the desired limit, a warning is displayed. + - The loginuid used by the container host to run containers, or None if the audit subsystem is unavailable. + - The cgroup subsystems registered with the Linux kernel. + - The mounts visible within a container. + - The status of the systemd cgroup v1 hierarchy. + + This information is collected together to reduce the number of container runs to probe the container host. + """ + try: + return detect_host_properties.properties # type: ignore[attr-defined] + except AttributeError: + pass + + single_line_commands = ( + 'audit-status', + 'cat /proc/sys/fs/nr_open', + 'ulimit -Hn', + '(cat /proc/1/loginuid; echo)', + ) + + multi_line_commands = ( + ' && '.join(single_line_commands), + 'cat /proc/1/cgroup', + 'cat /proc/1/mountinfo', + ) + + options = ['--volume', '/sys/fs/cgroup:/probe:ro'] + cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)] + + stdout = run_utility_container(args, f'ansible-test-probe-{args.session_name}', cmd, options)[0] + + if args.explain: + return ContainerHostProperties( + audit_code='???', + max_open_files=MAX_NUM_OPEN_FILES, + loginuid=LOGINUID_NOT_SET, + cgroup_v1=SystemdControlGroupV1Status.VALID, + cgroup_v2=False, + ) + + blocks = stdout.split('\n-\n') + + values = blocks[0].split('\n') + + audit_parts = values[0].split(' ', 1) + audit_status = int(audit_parts[0]) + audit_code = audit_parts[1] + + system_limit = int(values[1]) + hard_limit = int(values[2]) + loginuid = int(values[3]) if values[3] else None + + cgroups = CGroupEntry.loads(blocks[1]) + mounts = MountEntry.loads(blocks[2]) + + if hard_limit < MAX_NUM_OPEN_FILES and hard_limit < system_limit and require_docker().command == 'docker': + # Podman will use the highest possible limits, up to its default of 1M. + # See: https://github.com/containers/podman/blob/009afb50b308548eb129bc68e654db6c6ad82e7a/pkg/specgen/generate/oci.go#L39-L58 + # Docker limits are less predictable. They could be the system limit or the user's soft limit. + # If Docker is running as root it should be able to use the system limit. + # When Docker reports a limit below the preferred value and the system limit, attempt to use the preferred value, up to the system limit. + options = ['--ulimit', f'nofile={min(system_limit, MAX_NUM_OPEN_FILES)}'] + cmd = ['sh', '-c', 'ulimit -Hn'] + + try: + stdout = run_utility_container(args, f'ansible-test-ulimit-{args.session_name}', cmd, options)[0] + except SubprocessError as ex: + display.warning(str(ex)) + else: + hard_limit = int(stdout) + + # Check the audit error code from attempting to query the container host's audit status. + # + # The following error codes are known to occur: + # + # EPERM - Operation not permitted + # This occurs when the root user runs a container but lacks the AUDIT_WRITE capability. + # This will cause patched versions of OpenSSH to disconnect after a login succeeds. + # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch + # + # EBADF - Bad file number + # This occurs when the host doesn't support the audit system (the open_audit call fails). + # This allows SSH logins to succeed despite the failure. + # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/netlink.c#L204-L209 + # + # ECONNREFUSED - Connection refused + # This occurs when a non-root user runs a container without the AUDIT_WRITE capability. + # When sending an audit message, libaudit ignores this error condition. + # This allows SSH logins to succeed despite the failure. + # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/deprecated.c#L48-L52 + + subsystems = set(cgroup.subsystem for cgroup in cgroups) + mount_types = {mount.path: mount.type for mount in mounts} + + if 'systemd' not in subsystems: + cgroup_v1 = SystemdControlGroupV1Status.SUBSYSTEM_MISSING + elif not (mount_type := mount_types.get(pathlib.PurePosixPath('/probe/systemd'))): + cgroup_v1 = SystemdControlGroupV1Status.FILESYSTEM_NOT_MOUNTED + elif mount_type != MountType.CGROUP_V1: + cgroup_v1 = SystemdControlGroupV1Status.MOUNT_TYPE_NOT_CORRECT + else: + cgroup_v1 = SystemdControlGroupV1Status.VALID + + cgroup_v2 = mount_types.get(pathlib.PurePosixPath('/probe')) == MountType.CGROUP_V2 + + display.info(f'Container host audit status: {audit_code} ({audit_status})', verbosity=1) + display.info(f'Container host max open files: {hard_limit}', verbosity=1) + display.info(f'Container loginuid: {loginuid if loginuid is not None else "unavailable"}' + f'{" (not set)" if loginuid == LOGINUID_NOT_SET else ""}', verbosity=1) + + if hard_limit < MAX_NUM_OPEN_FILES: + display.warning(f'Unable to set container max open files to {MAX_NUM_OPEN_FILES}. Using container host limit of {hard_limit} instead.') + else: + hard_limit = MAX_NUM_OPEN_FILES + + properties = ContainerHostProperties( + # The errno (audit_status) is intentionally not exposed here, as it can vary across systems and architectures. + # Instead, the symbolic name (audit_code) is used, which is resolved inside the container which generated the error. + # See: https://man7.org/linux/man-pages/man3/errno.3.html + audit_code=audit_code, + max_open_files=hard_limit, + loginuid=loginuid, + cgroup_v1=cgroup_v1, + cgroup_v2=cgroup_v2, + ) + + detect_host_properties.properties = properties # type: ignore[attr-defined] + + return properties + + +def run_utility_container( + args: CommonConfig, + name: str, + cmd: list[str], + options: list[str], + data: t.Optional[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Run the specified command using the ansible-test utility container, returning stdout and stderr.""" + options = options + [ + '--name', name, + '--rm', + ] + + if data: + options.append('-i') + + docker_pull(args, UTILITY_IMAGE) + + return docker_run(args, UTILITY_IMAGE, options, cmd, data) + class DockerCommand: """Details about the available docker command.""" @@ -57,7 +440,7 @@ def detect(): # type: () -> t.Optional[DockerCommand] executable = find_executable(command, required=False) if executable: - version = raw_command([command, '-v'], capture=True)[0].strip() + version = raw_command([command, '-v'], env=docker_environment(), capture=True)[0].strip() if command == 'docker' and 'podman' in version: continue # avoid detecting podman as docker @@ -118,162 +501,215 @@ def get_docker_hostname(): # type: () -> str @cache -def get_docker_container_id(): # type: () -> t.Optional[str] - """Return the current container ID if running in a container, otherwise return None.""" - path = '/proc/self/cpuset' - container_id = None - - if os.path.exists(path): - # File content varies based on the environment: - # No Container: / - # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507 - # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891 - # Podman: /../../../../../.. - contents = read_text_file(path) +def get_podman_host_ip(): # type: () -> str + """Return the IP of the Podman host.""" + podman_host_ip = socket.gethostbyname(get_podman_hostname()) - cgroup_path, cgroup_name = os.path.split(contents.strip()) + display.info('Detected Podman host IP: %s' % podman_host_ip, verbosity=1) - if cgroup_path in ('/docker', '/azpl_job'): - container_id = cgroup_name + return podman_host_ip - if container_id: - display.info('Detected execution in Docker container: %s' % container_id, verbosity=1) - - return container_id +@cache +def get_podman_default_hostname(): # type: () -> t.Optional[str] + """Return the default hostname of the Podman service. -def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str - """ - Return the preferred network name for use with Docker. The selection logic is: - - the network selected by the user with `--docker-network` - - the network of the currently running docker container (if any) - - the default docker network (returns None) + --format was added in podman 3.3.0, this functionality depends on it's availability """ + hostname = None try: - return get_docker_preferred_network_name.network - except AttributeError: - pass - - network = None + stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], env=docker_environment(), capture=True)[0] + except SubprocessError: + stdout = '[]' - if args.docker_network: - network = args.docker_network - else: - current_container_id = get_docker_container_id() + try: + connections = json.loads(stdout) + except json.decoder.JSONDecodeError: + return hostname - if current_container_id: - # Make sure any additional containers we launch use the same network as the current container we're running in. - # This is needed when ansible-test is running in a container that is not connected to Docker's default network. - container = docker_inspect(args, current_container_id, always=True) - network = container.get_network_name() + for connection in connections: + # A trailing indicates the default + if connection['Name'][-1] == '*': + hostname = connection['URI'] + break - get_docker_preferred_network_name.network = network + return hostname - return network +@cache +def get_podman_remote(): # type: () -> t.Optional[str] + """Return the remote podman hostname, if any, otherwise return None.""" + # URL value resolution precedence: + # - command line value + # - environment variable CONTAINER_HOST + # - containers.conf + # - unix://run/podman/podman.sock + hostname = None + + podman_host = os.environ.get('CONTAINER_HOST') + if not podman_host: + podman_host = get_podman_default_hostname() + + if podman_host and podman_host.startswith('ssh://'): + try: + hostname = urllib.parse.urlparse(podman_host).hostname + except ValueError: + display.warning('Could not parse podman URI "%s"' % podman_host) + else: + display.info('Detected Podman remote: %s' % hostname, verbosity=1) + return hostname -def is_docker_user_defined_network(network): # type: (str) -> bool - """Return True if the network being used is a user-defined network.""" - return network and network != 'bridge' +@cache +def get_podman_hostname(): # type: () -> str + """Return the hostname of the Podman service.""" + hostname = get_podman_remote() -def docker_pull(args, image): # type: (EnvironmentConfig, str) -> None - """ - Pull the specified image if it is not available. - Images without a tag or digest will not be pulled. - Retries up to 10 times if the pull fails. - """ - if '@' not in image and ':' not in image: - display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2) - return + if not hostname: + hostname = 'localhost' + display.info('Assuming Podman is available on localhost.', verbosity=1) - if docker_image_exists(args, image): - display.info('Skipping pull of existing image: %s' % image, verbosity=2) - return + return hostname - for _iteration in range(1, 10): - try: - docker_command(args, ['pull', image]) - return - except SubprocessError: - display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image) - time.sleep(3) - raise ApplicationError('Failed to pull docker image "%s".' % image) +@cache +def get_docker_container_id(): # type: () -> t.Optional[str] + """Return the current container ID if running in a container, otherwise return None.""" + mountinfo_path = pathlib.Path('/proc/self/mountinfo') + container_id = None + engine = None + + if mountinfo_path.is_file(): + # NOTE: This method of detecting the container engine and container ID relies on implementation details of each container engine. + # Although the implementation details have remained unchanged for some time, there is no guarantee they will continue to work. + # There have been proposals to create a standard mechanism for this, but none is currently available. + # See: https://github.com/opencontainers/runtime-spec/issues/1105 + + mounts = MountEntry.loads(mountinfo_path.read_text()) + + for mount in mounts: + if str(mount.path) == '/etc/hostname': + # Podman generates /etc/hostname in the makePlatformBindMounts function. + # That function ends up using ContainerRunDirectory to generate a path like: {prefix}/{container_id}/userdata/hostname + # NOTE: The {prefix} portion of the path can vary, so should not be relied upon. + # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/libpod/container_internal_linux.go#L660-L664 + # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/vendor/github.com/containers/storage/store.go#L3133 + # This behavior has existed for ~5 years and was present in Podman version 0.2. + # See: https://github.com/containers/podman/pull/248 + if match := re.search('/(?P[0-9a-f]{64})/userdata/hostname$', str(mount.root)): + container_id = match.group('id') + engine = 'Podman' + break + + # Docker generates /etc/hostname in the BuildHostnameFile function. + # That function ends up using the containerRoot function to generate a path like: {prefix}/{container_id}/hostname + # NOTE: The {prefix} portion of the path can vary, so should not be relied upon. + # See: https://github.com/moby/moby/blob/cd8a090e6755bee0bdd54ac8a894b15881787097/container/container_unix.go#L58 + # See: https://github.com/moby/moby/blob/92e954a2f05998dc05773b6c64bbe23b188cb3a0/daemon/container.go#L86 + # This behavior has existed for at least ~7 years and was present in Docker version 1.0.1. + # See: https://github.com/moby/moby/blob/v1.0.1/daemon/container.go#L351 + # See: https://github.com/moby/moby/blob/v1.0.1/daemon/daemon.go#L133 + if match := re.search('/(?P[0-9a-f]{64})/hostname$', str(mount.root)): + container_id = match.group('id') + engine = 'Docker' + break + if container_id: + display.info(f'Detected execution in {engine} container ID: {container_id}', verbosity=1) -def docker_cp_to(args, container_id, src, dst): # type: (EnvironmentConfig, str, str, str) -> None - """Copy a file to the specified container.""" - docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)]) + return container_id -def docker_run( - args, # type: EnvironmentConfig - image, # type: str - options, # type: t.Optional[t.List[str]] - cmd=None, # type: t.Optional[t.List[str]] - create_only=False, # type: bool -): # type: (...) -> str - """Run a container using the given docker image.""" - if not options: - options = [] +def docker_pull(args, image): # type: (CommonConfig, str) -> None + """ + Pull the specified image if it is not available. + Images without a tag or digest will not be pulled. + Retries up to 10 times if the pull fails. + A warning will be shown for any image with volumes defined. + Images will be pulled only once. + Concurrent pulls for the same image will block until the first completes. + """ + with named_lock(f'docker_pull:{image}') as first: + if first: + __docker_pull(args, image) - if not cmd: - cmd = [] - if create_only: - command = 'create' +def __docker_pull(args: CommonConfig, image: str) -> None: + """Internal implementation for docker_pull. Do not call directly.""" + if '@' not in image and ':' not in image: + display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2) + inspect = docker_image_inspect(args, image) + elif inspect := docker_image_inspect(args, image, always=True): + display.info('Skipping pull of existing image: %s' % image, verbosity=2) else: - command = 'run' + for _iteration in range(1, 10): + try: + docker_command(args, ['pull', image], capture=False) - network = get_docker_preferred_network_name(args) + if (inspect := docker_image_inspect(args, image)) or args.explain: + break - if is_docker_user_defined_network(network): - # Only when the network is not the default bridge network. - options.extend(['--network', network]) + display.warning(f'Image "{image}" not found after pull completed. Waiting a few seconds before trying again.') + except SubprocessError: + display.warning(f'Failed to pull container image "{image}". Waiting a few seconds before trying again.') + time.sleep(3) + else: + raise ApplicationError(f'Failed to pull container image "{image}".') - for _iteration in range(1, 3): - try: - stdout = docker_command(args, [command] + options + [image] + cmd, capture=True)[0] + if inspect and inspect.volumes: + display.warning(f'Image "{image}" contains {len(inspect.volumes)} volume(s): {", ".join(sorted(inspect.volumes))}\n' + 'This may result in leaking anonymous volumes. It may also prevent the image from working on some hosts or container engines.\n' + 'The image should be rebuilt without the use of the VOLUME instruction.', + unique=True) - if args.explain: - return ''.join(random.choice('0123456789abcdef') for _iteration in range(64)) - return stdout.strip() - except SubprocessError as ex: - display.error(ex) - display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image) - time.sleep(3) +def docker_cp_to(args, container_id, src, dst): # type: (CommonConfig, str, str, str) -> None + """Copy a file to the specified container.""" + docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)], capture=True) - raise ApplicationError('Failed to run docker image "%s".' % image) +def docker_create( + args: CommonConfig, + image: str, + options: list[str], + cmd: list[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Create a container using the given docker image.""" + return docker_command(args, ['create'] + options + [image] + cmd, capture=True) -def docker_start(args, container_id, options=None): # type: (EnvironmentConfig, str, t.Optional[t.List[str]]) -> (t.Optional[str], t.Optional[str]) - """ - Start a docker container by name or ID - """ - if not options: - options = [] - for _iteration in range(1, 3): - try: - return docker_command(args, ['start'] + options + [container_id], capture=True) - except SubprocessError as ex: - display.error(ex) - display.warning('Failed to start docker container "%s". Waiting a few seconds before trying again.' % container_id) - time.sleep(3) +def docker_run( + args: CommonConfig, + image: str, + options: list[str], + cmd: list[str] = None, + data: t.Optional[str] = None, +) -> tuple[t.Optional[str], t.Optional[str]]: + """Run a container using the given docker image.""" + return docker_command(args, ['run'] + options + [image] + cmd, data=data, capture=True) + - raise ApplicationError('Failed to run docker container "%s".' % container_id) +def docker_start( + args: CommonConfig, + container_id: str, + options: list[str], +) -> tuple[t.Optional[str], t.Optional[str]]: + """Start a container by name or ID.""" + return docker_command(args, ['start'] + options + [container_id], capture=True) -def docker_rm(args, container_id): # type: (EnvironmentConfig, str) -> None +def docker_rm(args, container_id): # type: (CommonConfig, str) -> None """Remove the specified container.""" try: - docker_command(args, ['rm', '-f', container_id], capture=True) + # Stop the container with SIGKILL immediately, then remove the container. + # Podman supports the `--time` option on `rm`, but only since version 4.0.0. + # Docker does not support the `--time` option on `rm`. + docker_command(args, ['stop', '--time', '0', container_id], capture=True) + docker_command(args, ['rm', container_id], capture=True) except SubprocessError as ex: - if 'no such container' in ex.stderr: - pass # podman does not handle this gracefully, exits 1 - else: + # Both Podman and Docker report an error if the container does not exist. + # The error messages contain the same "no such container" string, differing only in capitalization. + if 'no such container' not in ex.stderr.lower(): raise ex @@ -291,7 +727,7 @@ def __init__(self, identifier): class DockerInspect: """The results of `docker inspect` for a single container.""" - def __init__(self, args, inspection): # type: (EnvironmentConfig, t.Dict[str, t.Any]) -> None + def __init__(self, args, inspection): # type: (CommonConfig, t.Dict[str, t.Any]) -> None self.args = args self.inspection = inspection @@ -334,6 +770,14 @@ def running(self): # type: () -> bool """Return True if the container is running, otherwise False.""" return self.state['Running'] + @property + def pid(self) -> int: + """Return the PID of the init process.""" + if self.args.explain: + return 0 + + return self.state['Pid'] + @property def env(self): # type: () -> t.List[str] """Return a list of the environment variables used to create the container.""" @@ -373,33 +817,14 @@ def get_network_name(self): # type: () -> str return networks[0] - def get_ip_address(self): # type: () -> t.Optional[str] - """Return the IP address of the container for the preferred docker network.""" - if self.networks: - network_name = get_docker_preferred_network_name(self.args) - - if not network_name: - # Sort networks and use the first available. - # This assumes all containers will have access to the same networks. - network_name = sorted(self.networks.keys()).pop(0) - - ipaddress = self.networks[network_name]['IPAddress'] - else: - ipaddress = self.network_settings['IPAddress'] - - if not ipaddress: - return None - - return ipaddress - -def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, str, bool) -> DockerInspect +def docker_inspect(args, identifier, always=False): # type: (CommonConfig, str, bool) -> DockerInspect """ - Return the results of `docker inspect` for the specified container. + Return the results of `docker container inspect` for the specified container. Raises a ContainerNotFoundError if the container was not found. """ try: - stdout = docker_command(args, ['inspect', identifier], capture=True, always=always)[0] + stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0] except SubprocessError as ex: stdout = ex.stdout @@ -414,29 +839,118 @@ def docker_inspect(args, identifier, always=False): # type: (EnvironmentConfig, raise ContainerNotFoundError(identifier) -def docker_network_disconnect(args, container_id, network): # type: (EnvironmentConfig, str, str) -> None +def docker_network_disconnect(args, container_id, network): # type: (CommonConfig, str, str) -> None """Disconnect the specified docker container from the given network.""" docker_command(args, ['network', 'disconnect', network, container_id], capture=True) -def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool - """Return True if the image exists, otherwise False.""" +class DockerImageInspect: + """The results of `docker image inspect` for a single image.""" + def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None: + self.args = args + self.inspection = inspection + + # primary properties + + @property + def config(self) -> dict[str, t.Any]: + """Return a dictionary of the image config.""" + return self.inspection['Config'] + + # nested properties + + @property + def volumes(self) -> dict[str, t.Any]: + """Return a dictionary of the image volumes.""" + return self.config.get('Volumes') or {} + + @property + def cmd(self) -> list[str]: + """The command to run when the container starts.""" + return self.config['Cmd'] + + +@mutex +def docker_image_inspect(args: CommonConfig, image: str, always: bool = False) -> t.Optional[DockerImageInspect]: + """ + Return the results of `docker image inspect` for the specified image or None if the image does not exist. + """ + inspect_cache: dict[str, DockerImageInspect] + + try: + inspect_cache = docker_image_inspect.cache # type: ignore[attr-defined] + except AttributeError: + inspect_cache = docker_image_inspect.cache = {} # type: ignore[attr-defined] + + if inspect_result := inspect_cache.get(image): + return inspect_result + try: - docker_command(args, ['image', 'inspect', image], capture=True) + stdout = docker_command(args, ['image', 'inspect', image], capture=True, always=always)[0] except SubprocessError: - return False + stdout = '[]' + + if args.explain and not always: + items = [] + else: + items = json.loads(stdout) - return True + if len(items) > 1: + raise ApplicationError(f'Inspection of image "{image}" resulted in {len(items)} items:\n{json.dumps(items, indent=4)}') + + if len(items) == 1: + inspect_result = DockerImageInspect(args, items[0]) + inspect_cache[image] = inspect_result + return inspect_result + + return None + + +class DockerNetworkInspect: + """The results of `docker network inspect` for a single network.""" + def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None: + self.args = args + self.inspection = inspection + + +def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]: + """ + Return the results of `docker network inspect` for the specified network or None if the network does not exist. + """ + try: + stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0] + except SubprocessError: + stdout = '[]' + + if args.explain and not always: + items = [] + else: + items = json.loads(stdout) + + if len(items) == 1: + return DockerNetworkInspect(args, items[0]) + + return None + + +def docker_logs(args: CommonConfig, container_id: str) -> None: + """Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception.""" + try: + docker_command(args, ['logs', container_id], capture=False) + except SubprocessError as ex: + display.error(str(ex)) def docker_exec( - args, # type: EnvironmentConfig + args, # type: CommonConfig container_id, # type: str cmd, # type: t.List[str] + capture, # type: bool options=None, # type: t.Optional[t.List[str]] - capture=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] data=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Execute the given command in the specified container.""" @@ -446,38 +960,45 @@ def docker_exec( if data or stdin or stdout: options.append('-i') - return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, data=data) - - -def docker_info(args): # type: (CommonConfig) -> t.Dict[str, t.Any] - """Return a dictionary containing details from the `docker info` command.""" - stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True) - return json.loads(stdout) - - -def docker_version(args): # type: (CommonConfig) -> t.Dict[str, t.Any] - """Return a dictionary containing details from the `docker version` command.""" - stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True) - return json.loads(stdout) + return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, + output_stream=output_stream, data=data) def docker_command( args, # type: CommonConfig cmd, # type: t.List[str] - capture=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + capture, # type: bool + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] always=False, # type: bool data=None, # type: t.Optional[str] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified docker command.""" env = docker_environment() - command = require_docker().command - return run_command(args, [command] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always, data=data) + command = [require_docker().command] + + if command[0] == 'podman' and get_podman_remote(): + command.append('--remote') + + return run_command(args, command + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, always=always, + output_stream=output_stream, data=data) def docker_environment(): # type: () -> t.Dict[str, str] """Return a dictionary of docker related environment variables found in the current environment.""" env = common_environment() - env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_'))) + + var_names = { + 'XDG_RUNTIME_DIR', # podman + } + + var_prefixes = { + 'CONTAINER_', # podman remote + 'DOCKER_', # docker + } + + env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)}) + return env diff --git a/test/lib/ansible_test/_internal/host_configs.py b/test/lib/ansible_test/_internal/host_configs.py index 41fb7a89b92177..e69e706542cd66 100644 --- a/test/lib/ansible_test/_internal/host_configs.py +++ b/test/lib/ansible_test/_internal/host_configs.py @@ -18,6 +18,8 @@ ) from .completion import ( + AuditMode, + CGroupVersion, CompletionConfig, docker_completion, DockerCompletionConfig, @@ -39,6 +41,7 @@ get_available_python_versions, str_to_version, version_to_str, + Architecture, ) @@ -183,8 +186,10 @@ def have_root(self): # type: () -> bool def get_defaults(self, context): # type: (HostContext) -> PosixCompletionConfig """Return the default settings.""" - def apply_defaults(self, context, defaults): # type: (HostContext, PosixCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, PosixCompletionConfig) + super().apply_defaults(context, defaults) self.python = self.python or NativePythonConfig() @@ -204,25 +209,29 @@ class RemoteConfig(HostConfig, metaclass=abc.ABCMeta): """Base class for remote host configuration.""" name: t.Optional[str] = None provider: t.Optional[str] = None + arch: t.Optional[str] = None @property - def platform(self): + def platform(self): # type: () -> str """The name of the platform.""" return self.name.partition('/')[0] @property - def version(self): + def version(self): # type: () -> str """The version of the platform.""" return self.name.partition('/')[2] - def apply_defaults(self, context, defaults): # type: (HostContext, RemoteCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, RemoteCompletionConfig) + super().apply_defaults(context, defaults) if self.provider == 'default': self.provider = None self.provider = self.provider or defaults.provider or 'aws' + self.arch = self.arch or defaults.arch or Architecture.X86_64 @property def is_managed(self): # type: () -> bool @@ -262,8 +271,9 @@ def get_defaults(self, context): # type: (HostContext) -> InventoryCompletionCo """Return the default settings.""" return InventoryCompletionConfig() - def apply_defaults(self, context, defaults): # type: (HostContext, InventoryCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, InventoryCompletionConfig) @dataclasses.dataclass @@ -274,6 +284,8 @@ class DockerConfig(ControllerHostConfig, PosixConfig): memory: t.Optional[int] = None privileged: t.Optional[bool] = None seccomp: t.Optional[str] = None + cgroup: t.Optional[CGroupVersion] = None + audit: t.Optional[AuditMode] = None def get_defaults(self, context): # type: (HostContext) -> DockerCompletionConfig """Return the default settings.""" @@ -293,8 +305,10 @@ def get_default_targets(self, context): # type: (HostContext) -> t.List[Control return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()] - def apply_defaults(self, context, defaults): # type: (HostContext, DockerCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, DockerCompletionConfig) + super().apply_defaults(context, defaults) self.name = defaults.name @@ -303,6 +317,12 @@ def apply_defaults(self, context, defaults): # type: (HostContext, DockerComple if self.seccomp is None: self.seccomp = defaults.seccomp + if self.cgroup is None: + self.cgroup = defaults.cgroup_enum + + if self.audit is None: + self.audit = defaults.audit_enum + if self.privileged is None: self.privileged = False @@ -323,7 +343,7 @@ def have_root(self): # type: () -> bool @dataclasses.dataclass class PosixRemoteConfig(RemoteConfig, ControllerHostConfig, PosixConfig): """Configuration for a POSIX remote host.""" - arch: t.Optional[str] = None + become: t.Optional[str] = None def get_defaults(self, context): # type: (HostContext) -> PosixRemoteCompletionConfig """Return the default settings.""" @@ -342,6 +362,14 @@ def get_default_targets(self, context): # type: (HostContext) -> t.List[Control return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()] + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None + """Apply default settings.""" + assert isinstance(defaults, PosixRemoteCompletionConfig) + + super().apply_defaults(context, defaults) + + self.become = self.become or defaults.become + @property def have_root(self): # type: () -> bool """True if root is available, otherwise False.""" @@ -358,9 +386,7 @@ class WindowsRemoteConfig(RemoteConfig, WindowsConfig): """Configuration for a remoe Windows host.""" def get_defaults(self, context): # type: (HostContext) -> WindowsRemoteCompletionConfig """Return the default settings.""" - return filter_completion(windows_completion()).get(self.name) or WindowsRemoteCompletionConfig( - name=self.name, - ) + return filter_completion(windows_completion()).get(self.name) or windows_completion().get(self.platform) @dataclasses.dataclass @@ -383,10 +409,13 @@ def get_defaults(self, context): # type: (HostContext) -> NetworkRemoteCompleti """Return the default settings.""" return filter_completion(network_completion()).get(self.name) or NetworkRemoteCompletionConfig( name=self.name, + placeholder=True, ) - def apply_defaults(self, context, defaults): # type: (HostContext, NetworkRemoteCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, NetworkRemoteCompletionConfig) + super().apply_defaults(context, defaults) self.collection = self.collection or defaults.collection @@ -424,8 +453,10 @@ def get_defaults(self, context): # type: (HostContext) -> PosixCompletionConfig """Return the default settings.""" return context.controller_config.get_defaults(context) - def apply_defaults(self, context, defaults): # type: (HostContext, PosixCompletionConfig) -> None + def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None """Apply default settings.""" + assert isinstance(defaults, PosixCompletionConfig) + self.controller = context.controller_config if not self.python and not defaults.supported_pythons: @@ -449,7 +480,7 @@ def have_root(self): # type: () -> bool class FallbackReason(enum.Enum): - """Reason fallback was peformed.""" + """Reason fallback was performed.""" ENVIRONMENT = enum.auto() PYTHON = enum.auto() diff --git a/test/lib/ansible_test/_internal/host_profiles.py b/test/lib/ansible_test/_internal/host_profiles.py index e3aeeeebbc8807..7ff919367cdaa3 100644 --- a/test/lib/ansible_test/_internal/host_profiles.py +++ b/test/lib/ansible_test/_internal/host_profiles.py @@ -4,11 +4,13 @@ import abc import dataclasses import os +import shlex import tempfile import time import typing as t from .io import ( + read_text_file, write_text_file, ) @@ -40,6 +42,7 @@ from .core_ci import ( AnsibleCoreCI, SshKey, + VmResource, ) from .util import ( @@ -50,16 +53,30 @@ get_type_map, sanitize_host_name, sorted_versions, + InternalError, + HostConnectionError, + ANSIBLE_TEST_TARGET_ROOT, ) from .util_common import ( + get_docs_url, intercept_python, ) from .docker_util import ( docker_exec, + docker_image_inspect, + docker_logs, + docker_pull, docker_rm, get_docker_hostname, + require_docker, + get_docker_info, + detect_host_properties, + run_utility_container, + SystemdControlGroupV1Status, + LOGINUID_NOT_SET, + UTILITY_IMAGE, ) from .bootstrap import ( @@ -96,24 +113,79 @@ ) from .become import ( - Su, + Become, + SUPPORTED_BECOME_METHODS, Sudo, ) +from .completion import ( + AuditMode, + CGroupVersion, +) + +from .dev.container_probe import ( + CGroupMount, + CGroupPath, + CGroupState, + MountType, + check_container_cgroup_status, +) + TControllerHostConfig = t.TypeVar('TControllerHostConfig', bound=ControllerHostConfig) THostConfig = t.TypeVar('THostConfig', bound=HostConfig) TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig) TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig) +class ControlGroupError(ApplicationError): + """Raised when the container host does not have the necessary cgroup support to run a container.""" + def __init__(self, args: CommonConfig, reason: str) -> None: + engine = require_docker().command + dd_wsl2 = get_docker_info(args).docker_desktop_wsl2 + + message = f''' +{reason} + +Run the following commands as root on the container host to resolve this issue: + + mkdir /sys/fs/cgroup/systemd + mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + chown -R {{user}}:{{group}} /sys/fs/cgroup/systemd # only when rootless + +NOTE: These changes must be applied each time the container host is rebooted. +'''.strip() + + podman_message = ''' + If rootless Podman is already running [1], you may need to stop it before + containers are able to use the new mount point. + +[1] Check for 'podman' and 'catatonit' processes. +''' + + dd_wsl_message = f''' + When using Docker Desktop with WSL2, additional configuration [1] is required. + +[1] {get_docs_url("https://docs.ansible.com/ansible-core/devel/dev_guide/testing_running_locally.html#docker-desktop-with-wsl2")} +''' + + if engine == 'podman': + message += podman_message + elif dd_wsl2: + message += dd_wsl_message + + message = message.strip() + + super().__init__(message) + + @dataclasses.dataclass(frozen=True) class Inventory: """Simple representation of an Ansible inventory.""" - host_groups: t.Dict[str, t.Dict[str, t.Dict[str, str]]] + host_groups: t.Dict[str, t.Dict[str, t.Dict[str, t.Union[str, int]]]] extra_groups: t.Optional[t.Dict[str, t.List[str]]] = None @staticmethod - def create_single_host(name, variables): # type: (str, t.Dict[str, str]) -> Inventory + def create_single_host(name, variables): # type: (str, t.Dict[str, t.Union[str, int]]) -> Inventory """Return an inventory instance created from the given hostname and variables.""" return Inventory(host_groups=dict(all={name: variables})) @@ -147,7 +219,7 @@ def write(self, args, path): # type: (CommonConfig, str) -> None inventory_text = inventory_text.strip() if not args.explain: - write_text_file(path, inventory_text) + write_text_file(path, inventory_text + '\n') display.info(f'>>> Inventory\n{inventory_text}', verbosity=3) @@ -176,6 +248,9 @@ def provision(self): # type: () -> None def setup(self): # type: () -> None """Perform out-of-band setup before delegation.""" + def on_target_failure(self) -> None: + """Executed during failure handling if this profile is a target.""" + def deprovision(self): # type: () -> None """Deprovision the host after delegation has completed.""" @@ -294,12 +369,18 @@ def wait_for_instance(self): # type: () -> AnsibleCoreCI def create_core_ci(self, load): # type: (bool) -> AnsibleCoreCI """Create and return an AnsibleCoreCI instance.""" + if not self.config.arch: + raise InternalError(f'No arch specified for config: {self.config}') + return AnsibleCoreCI( args=self.args, - platform=self.config.platform, - version=self.config.version, - provider=self.config.provider, - suffix='controller' if self.controller else 'target', + resource=VmResource( + platform=self.config.platform, + version=self.config.version, + architecture=self.config.arch, + provider=self.config.provider, + tag='controller' if self.controller else 'target', + ), load=load, ) @@ -322,6 +403,17 @@ def get_controller_target_connections(self): # type: () -> t.List[SshConnection class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[DockerConfig]): """Host profile for a docker instance.""" + + MARKER = 'ansible-test-marker' + + @dataclasses.dataclass(frozen=True) + class InitConfig: + """Configuration details required to run the container init.""" + options: list[str] + command: str + command_privileged: bool + expected_mounts: tuple[CGroupMount, ...] + @property def container_name(self): # type: () -> t.Optional[str] """Return the stored container name, if any, otherwise None.""" @@ -332,24 +424,519 @@ def container_name(self, value): # type: (str) -> None """Store the given container name.""" self.state['container_name'] = value + @property + def cgroup_path(self) -> t.Optional[str]: + """Return the path to the cgroup v1 systemd hierarchy, if any, otherwise None.""" + return self.state.get('cgroup_path') + + @cgroup_path.setter + def cgroup_path(self, value: str) -> None: + """Store the path to the cgroup v1 systemd hierarchy.""" + self.state['cgroup_path'] = value + + @property + def label(self) -> str: + """Label to apply to resources related to this profile.""" + return f'{"controller" if self.controller else "target"}-{self.args.session_name}' + def provision(self): # type: () -> None """Provision the host before delegation.""" + init_probe = self.args.dev_probe_cgroups is not None + init_config = self.get_init_config() + container = run_support_container( args=self.args, context='__test_hosts__', image=self.config.image, - name=f'ansible-test-{"controller" if self.controller else "target"}-{self.args.session_name}', + name=f'ansible-test-{self.label}', ports=[22], publish_ports=not self.controller, # connections to the controller over SSH are not required - options=self.get_docker_run_options(), + options=init_config.options, cleanup=CleanupMode.NO, + cmd=self.build_init_command(init_config, init_probe), ) if not container: + if self.args.prime_containers: + if init_config.command_privileged or init_probe: + docker_pull(self.args, UTILITY_IMAGE) + return self.container_name = container.name + try: + options = ['--pid', 'host', '--privileged'] + + if init_config.command and init_config.command_privileged: + init_command = init_config.command + + if not init_probe: + init_command += f' && {shlex.join(self.wake_command)}' + + cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p', 'sh', '-c', init_command] + run_utility_container(self.args, f'ansible-test-init-{self.label}', cmd, options) + + if init_probe: + check_container_cgroup_status(self.args, self.config, self.container_name, init_config.expected_mounts) + + cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p'] + self.wake_command + run_utility_container(self.args, f'ansible-test-wake-{self.label}', cmd, options) + except SubprocessError: + display.info(f'Checking container "{self.container_name}" logs...') + docker_logs(self.args, self.container_name) + + raise + + def get_init_config(self) -> InitConfig: + """Return init config for running under the current container engine.""" + self.check_cgroup_requirements() + + engine = require_docker().command + init_config = getattr(self, f'get_{engine}_init_config')() + + return init_config + + def get_podman_init_config(self) -> InitConfig: + """Return init config for running under Podman.""" + options = self.get_common_run_options() + command: t.Optional[str] = None + command_privileged = False + expected_mounts: tuple[CGroupMount, ...] + + cgroup_version = get_docker_info(self.args).cgroup_version + + # Without AUDIT_WRITE the following errors may appear in the system logs of a container after attempting to log in using SSH: + # + # fatal: linux_audit_write_entry failed: Operation not permitted + # + # This occurs when running containers as root when the container host provides audit support, but the user lacks the AUDIT_WRITE capability. + # The AUDIT_WRITE capability is provided by docker by default, but not podman. + # See: https://github.com/moby/moby/pull/7179 + # + # OpenSSH Portable requires AUDIT_WRITE when logging in with a TTY if the Linux audit feature was compiled in. + # Containers with the feature enabled will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system. + # See: https://github.com/openssh/openssh-portable/blob/2dc328023f60212cd29504fc05d849133ae47355/audit-linux.c#L90 + # See: https://github.com/openssh/openssh-portable/blob/715c892f0a5295b391ae92c26ef4d6a86ea96e8e/loginrec.c#L476-L478 + # + # Some containers will be running a patched version of OpenSSH which blocks logins when EPERM is received while using the audit system. + # These containers will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system. + # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch + # + # Since only some containers carry the patch or enable the Linux audit feature in OpenSSH, this capability is enabled on a per-container basis. + # No warning is provided when adding this capability, since there's not really anything the user can do about it. + if self.config.audit == AuditMode.REQUIRED and detect_host_properties(self.args).audit_code == 'EPERM': + options.extend(('--cap-add', 'AUDIT_WRITE')) + + # Without AUDIT_CONTROL the following errors may appear in the system logs of a container after attempting to log in using SSH: + # + # pam_loginuid(sshd:session): Error writing /proc/self/loginuid: Operation not permitted + # pam_loginuid(sshd:session): set_loginuid failed + # + # Containers configured to use the pam_loginuid module will encounter this error. If the module is required, logins will fail. + # Since most containers will have this configuration, the code to handle this issue is applied to all containers. + # + # This occurs when the loginuid is set on the container host and doesn't match the user on the container host which is running the container. + # Container hosts which do not use systemd are likely to leave the loginuid unset and thus be unaffected. + # The most common source of a mismatch is the use of sudo to run ansible-test, which changes the uid but cannot change the loginuid. + # This condition typically occurs only under podman, since the loginuid is inherited from the current user. + # See: https://github.com/containers/podman/issues/13012#issuecomment-1034049725 + # + # This condition is detected by querying the loginuid of a container running on the container host. + # When it occurs, a warning is displayed and the AUDIT_CONTROL capability is added to containers to work around the issue. + # The warning serves as notice to the user that their usage of ansible-test is responsible for the additional capability requirement. + if (loginuid := detect_host_properties(self.args).loginuid) not in (0, LOGINUID_NOT_SET, None): + display.warning(f'Running containers with capability AUDIT_CONTROL since the container loginuid ({loginuid}) is incorrect. ' + 'This is most likely due to use of sudo to run ansible-test when loginuid is already set.', unique=True) + + options.extend(('--cap-add', 'AUDIT_CONTROL')) + + if self.config.cgroup == CGroupVersion.NONE: + # Containers which do not require cgroup do not use systemd. + + options.extend(( + # Disabling systemd support in Podman will allow these containers to work on hosts without systemd. + # Without this, running a container on a host without systemd results in errors such as (from crun): + # Error: crun: error stat'ing file `/sys/fs/cgroup/systemd`: No such file or directory: + # A similar error occurs when using runc: + # OCI runtime attempted to invoke a command that was not found + '--systemd', 'false', + # A private cgroup namespace limits what is visible in /proc/*/cgroup. + '--cgroupns', 'private', + # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Podman. + # This helps provide a consistent container environment across various container host configurations. + '--tmpfs', '/sys/fs/cgroup', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1: + # Podman hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-write in the container. + # They will also create a dedicated cgroup v1 systemd hierarchy for the container. + # On hosts with systemd this path is: /sys/fs/cgroup/systemd/libpod_parent/libpod-{container_id}/ + # On hosts without systemd this path is: /sys/fs/cgroup/systemd/{container_id}/ + + options.extend(( + # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics). + '--systemd', 'always', + # The host namespace must be used to permit the container to access the cgroup v1 systemd hierarchy created by Podman. + '--cgroupns', 'host', + # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container. + # Podman will provide a cgroup v1 systemd hiearchy on top of this. + '--tmpfs', '/sys/fs/cgroup', + )) + + self.check_systemd_cgroup_v1(options) # podman + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + # The mount point can be writable or not. + # The reason for the variation is not known. + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=None, state=CGroupState.HOST), + # The filesystem type can be tmpfs or devtmpfs. + # The reason for the variation is not known. + CGroupMount(path=CGroupPath.SYSTEMD_RELEASE_AGENT, type=None, writable=False, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2: + # Podman hosts providing cgroup v2 will give each container a read-write cgroup mount. + + options.extend(( + # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics). + '--systemd', 'always', + # A private cgroup namespace is used to avoid exposing the host cgroup to the container. + '--cgroupns', 'private', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + ) + elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2: + # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version. + # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create. + cgroup_path = self.create_systemd_cgroup_v1() # podman + command = f'echo 1 > {cgroup_path}/cgroup.procs' + + options.extend(( + # Force Podman to enable systemd support since a command is being provided. + '--systemd', 'always', + # A private cgroup namespace is required. Using the host cgroup namespace results in errors such as the following (from crun): + # Error: OCI runtime error: mount `/sys/fs/cgroup` to '/sys/fs/cgroup': Invalid argument + # A similar error occurs when using runc: + # Error: OCI runtime error: runc create failed: unable to start container process: error during container init: + # error mounting "/sys/fs/cgroup" to rootfs at "/sys/fs/cgroup": mount /sys/fs/cgroup:/sys/fs/cgroup (via /proc/self/fd/7), flags: 0x1000: + # invalid argument + '--cgroupns', 'private', + # Unlike Docker, Podman ignores a /sys/fs/cgroup tmpfs mount, instead exposing a cgroup v2 mount. + # The exposed volume will be read-write, but the container will have its own private namespace. + # Provide a read-only cgroup v1 systemd hierarchy under which the dedicated ansible-test cgroup will be mounted read-write. + # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy. + # Podman doesn't support using a tmpfs for this. Attempting to do so results in an error (from crun): + # Error: OCI runtime error: read: Invalid argument + # A similar error occurs when using runc: + # Error: OCI runtime error: runc create failed: unable to start container process: error during container init: + # error mounting "tmpfs" to rootfs at "/sys/fs/cgroup/systemd": tmpcopyup: failed to copy /sys/fs/cgroup/systemd to /proc/self/fd/7 + # (/tmp/runctop3876247619/runctmpdir1460907418): read /proc/self/fd/7/cgroup.kill: invalid argument + '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:ro', + # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test. + '--volume', f'{cgroup_path}:{cgroup_path}:rw', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=False, state=CGroupState.SHADOWED), + CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + else: + raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.') + + return self.InitConfig( + options=options, + command=command, + command_privileged=command_privileged, + expected_mounts=expected_mounts, + ) + + def get_docker_init_config(self) -> InitConfig: + """Return init config for running under Docker.""" + options = self.get_common_run_options() + command: t.Optional[str] = None + command_privileged = False + expected_mounts: tuple[CGroupMount, ...] + + cgroup_version = get_docker_info(self.args).cgroup_version + + if self.config.cgroup == CGroupVersion.NONE: + # Containers which do not require cgroup do not use systemd. + + if get_docker_info(self.args).cgroupns_option_supported: + # Use the `--cgroupns` option if it is supported. + # Older servers which do not support the option use the host group namespace. + # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only). + # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517 + # If the host cgroup namespace is used, cgroup information will be visible, but the cgroup mounts will be unavailable due to the tmpfs below. + options.extend(( + # A private cgroup namespace limits what is visible in /proc/*/cgroup. + '--cgroupns', 'private', + )) + + options.extend(( + # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Docker. + # This helps provide a consistent container environment across various container host configurations. + '--tmpfs', '/sys/fs/cgroup', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1: + # Docker hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-only in the container. + # They will also create a dedicated cgroup v1 systemd hierarchy for the container. + # The cgroup v1 system hierarchy path is: /sys/fs/cgroup/systemd/{container_id}/ + + if get_docker_info(self.args).cgroupns_option_supported: + # Use the `--cgroupns` option if it is supported. + # Older servers which do not support the option use the host group namespace. + # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only). + # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517 + options.extend(( + # The host cgroup namespace must be used. + # Otherwise, /proc/1/cgroup will report "/" for the cgroup path, which is incorrect. + # See: https://github.com/systemd/systemd/issues/19245#issuecomment-815954506 + # It is set here to avoid relying on the current Docker configuration. + '--cgroupns', 'host', + )) + + options.extend(( + # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container. + '--tmpfs', '/sys/fs/cgroup', + # A cgroup v1 systemd hierarchy needs to be mounted read-write over the read-only one provided by Docker. + # Alternatives were tested, but were unusable due to various issues: + # - Attempting to remount the existing mount point read-write will result in a "mount point is busy" error. + # - Adding the entire "/sys/fs/cgroup" mount will expose hierarchies other than systemd. + # If the host is a cgroup v2 hybrid host it would also expose the /sys/fs/cgroup/unified/ hierarchy read-write. + # On older systems, such as an Ubuntu 18.04 host, a dedicated v2 cgroup would not be used, exposing the host cgroups to the container. + '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', + )) + + self.check_systemd_cgroup_v1(options) # docker + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2: + # Docker hosts providing cgroup v2 will give each container a read-only cgroup mount. + # It must be remounted read-write before systemd starts. + # This must be done in a privileged container, otherwise a "permission denied" error can occur. + command = 'mount -o remount,rw /sys/fs/cgroup/' + command_privileged = True + + options.extend(( + # A private cgroup namespace is used to avoid exposing the host cgroup to the container. + # This matches the behavior in Podman 1.7.0 and later, which select cgroupns 'host' mode for cgroup v1 and 'private' mode for cgroup v2. + # See: https://github.com/containers/podman/pull/4374 + # See: https://github.com/containers/podman/blob/main/RELEASE_NOTES.md#170 + '--cgroupns', 'private', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE), + ) + elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2: + # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version. + # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create. + cgroup_path = self.create_systemd_cgroup_v1() # docker + command = f'echo 1 > {cgroup_path}/cgroup.procs' + + options.extend(( + # A private cgroup namespace is used since no access to the host cgroup namespace is required. + # This matches the configuration used for running cgroup v1 containers under Podman. + '--cgroupns', 'private', + # Provide a read-write tmpfs filesystem to support additional cgroup mount points. + # Without this Docker will provide a read-only cgroup2 mount instead. + '--tmpfs', '/sys/fs/cgroup', + # Provide a read-write tmpfs filesystem to simulate a systemd cgroup v1 hierarchy. + # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy. + '--tmpfs', '/sys/fs/cgroup/systemd', + # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test. + '--volume', f'{cgroup_path}:{cgroup_path}:rw', + )) + + expected_mounts = ( + CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.TMPFS, writable=True, state=None), + CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST), + ) + else: + raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.') + + return self.InitConfig( + options=options, + command=command, + command_privileged=command_privileged, + expected_mounts=expected_mounts, + ) + + def build_init_command(self, init_config: InitConfig, sleep: bool) -> t.Optional[list[str]]: + """ + Build and return the command to start in the container. + Returns None if the default command for the container should be used. + + The sleep duration below was selected to: + + - Allow enough time to perform necessary operations in the container before waking it. + - Make the delay obvious if the wake command doesn't run or succeed. + - Avoid hanging indefinitely or for an unreasonably long time. + + NOTE: The container must have a POSIX-compliant default shell "sh" with a non-builtin "sleep" command. + """ + command = '' + + if init_config.command and not init_config.command_privileged: + command += f'{init_config.command} && ' + + if sleep or init_config.command_privileged: + command += 'sleep 60 ; ' + + if not command: + return None + + docker_pull(self.args, self.config.image) + inspect = docker_image_inspect(self.args, self.config.image) + + command += f'exec {shlex.join(inspect.cmd)}' + + return ['sh', '-c', command] + + @property + def wake_command(self) -> list[str]: + """ + The command used to wake the container from sleep. + This will be run inside our utility container, so the command used does not need to be present in the container being woken up. + """ + return ['pkill', 'sleep'] + + def check_systemd_cgroup_v1(self, options: list[str]) -> None: + """Check the cgroup v1 systemd hierarchy to verify it is writeable for our container.""" + probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh')) + .replace('@MARKER@', self.MARKER) + .replace('@LABEL@', self.label)) + + cmd = ['sh'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-check-{self.label}', cmd, options, data=probe_script) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + raise ControlGroupError(self.args, 'Unable to create a v1 cgroup within the systemd hierarchy.\n' + f'Reason: {error}') from ex # cgroup probe failed + + raise + + def create_systemd_cgroup_v1(self) -> str: + """Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path.""" + self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}' + + # Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. + # The mkdir command will fail with "Permission denied" otherwise. + options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] + cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n' + f'Reason: {error}') from ex # cgroup create permission denied + + raise + + return self.cgroup_path + + @property + def delete_systemd_cgroup_v1_command(self) -> list[str]: + """The command used to remove the previously created ansible-test cgroup in the v1 systemd hierarchy.""" + return ['find', self.cgroup_path, '-type', 'd', '-delete'] + + def delete_systemd_cgroup_v1(self) -> None: + """Delete a previously created ansible-test cgroup in the v1 systemd hierarchy.""" + # Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. + # The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0. + options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] + cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}'] + + try: + run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options) + except SubprocessError as ex: + if error := self.extract_error(ex.stderr): + if error.endswith(': No such file or directory'): + return + + display.error(str(ex)) + + def extract_error(self, value: str) -> t.Optional[str]: + """ + Extract the ansible-test portion of the error message from the given value and return it. + Returns None if no ansible-test marker was found. + """ + lines = value.strip().splitlines() + + try: + idx = lines.index(self.MARKER) + except ValueError: + return None + + lines = lines[idx + 1:] + message = '\n'.join(lines) + + return message + + def check_cgroup_requirements(self): + """Check cgroup requirements for the container.""" + cgroup_version = get_docker_info(self.args).cgroup_version + + if cgroup_version not in (1, 2): + raise ApplicationError(f'The container host provides cgroup v{cgroup_version}, but only version v1 and v2 are supported.') + + # Stop early for containers which require cgroup v2 when the container host does not provide it. + # None of the containers included with ansible-test currently use this configuration. + # Support for v2-only was added in preparation for the eventual removal of cgroup v1 support from systemd after EOY 2023. + # See: https://github.com/systemd/systemd/pull/24086 + if self.config.cgroup == CGroupVersion.V2_ONLY and cgroup_version != 2: + raise ApplicationError(f'Container {self.config.name} requires cgroup v2 but the container host provides cgroup v{cgroup_version}.') + + # Containers which use old versions of systemd (earlier than version 226) require cgroup v1 support. + # If the host is a cgroup v2 (unified) host, changes must be made to how the container is run. + # + # See: https://github.com/systemd/systemd/blob/main/NEWS + # Under the "CHANGES WITH 226" section: + # > systemd now optionally supports the new Linux kernel "unified" control group hierarchy. + # + # NOTE: The container host must have the cgroup v1 mount already present. + # If the container is run rootless, the user it runs under must have permissions to the mount. + # + # The following commands can be used to make the mount available: + # + # mkdir /sys/fs/cgroup/systemd + # mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr + # chown -R {user}:{group} /sys/fs/cgroup/systemd # only when rootless + # + # See: https://github.com/containers/crun/blob/main/crun.1.md#runocisystemdforce_cgroup_v1path + if self.config.cgroup == CGroupVersion.V1_ONLY or (self.config.cgroup != CGroupVersion.NONE and get_docker_info(self.args).cgroup_version == 1): + if (cgroup_v1 := detect_host_properties(self.args).cgroup_v1) != SystemdControlGroupV1Status.VALID: + if self.config.cgroup == CGroupVersion.V1_ONLY: + if get_docker_info(self.args).cgroup_version == 2: + reason = f'Container {self.config.name} requires cgroup v1, but the container host only provides cgroup v2.' + else: + reason = f'Container {self.config.name} requires cgroup v1, but the container host does not appear to be running systemd.' + else: + reason = 'The container host provides cgroup v1, but does not appear to be running systemd.' + + reason += f'\n{cgroup_v1.value}' + + raise ControlGroupError(self.args, reason) # cgroup probe reported invalid state + def setup(self): # type: () -> None """Perform out-of-band setup before delegation.""" bootstrapper = BootstrapDocker( @@ -361,32 +948,62 @@ def setup(self): # type: () -> None setup_sh = bootstrapper.get_script() shell = setup_sh.splitlines()[0][2:] - docker_exec(self.args, self.container_name, [shell], data=setup_sh) + try: + docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False) + except SubprocessError: + display.info(f'Checking container "{self.container_name}" logs...') + docker_logs(self.args, self.container_name) + raise def deprovision(self): # type: () -> None """Deprovision the host after delegation has completed.""" - if not self.container_name: - return # provision was never called or did not succeed, so there is no container to remove - - if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success): - docker_rm(self.args, self.container_name) + container_exists = False + + if self.container_name: + if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success): + docker_rm(self.args, self.container_name) + else: + container_exists = True + + if self.cgroup_path: + if container_exists: + display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing. ' + f'Then run `{shlex.join(self.delete_systemd_cgroup_v1_command)}` on the container host.') + else: + self.delete_systemd_cgroup_v1() + elif container_exists: + display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing.') def wait(self): # type: () -> None """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" if not self.controller: con = self.get_controller_target_connections()[0] + last_error = '' - for dummy in range(1, 60): + for dummy in range(1, 10): try: con.run(['id'], capture=True) except SubprocessError as ex: if 'Permission denied' in ex.message: raise + last_error = str(ex) time.sleep(1) else: return + display.info('Checking SSH debug output...') + display.info(last_error) + + if not self.args.delegate and not self.args.host_path: + def callback() -> None: + """Callback to run during error display.""" + self.on_target_failure() # when the controller is not delegated, report failures immediately + else: + callback = None + + raise HostConnectionError(f'Timeout waiting for {self.config.name} container {self.container_name}.', callback) + def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" containers = get_container_database(self.args) @@ -402,6 +1019,10 @@ def get_controller_target_connections(self): # type: () -> t.List[SshConnection port=port, identity_file=SshKey(self.args).key, python_interpreter=self.python.path, + # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here. + # A substring is used to allow custom containers to work, not just the one provided with ansible-test. + enable_rsa_sha1='centos6' in self.config.image, ) return [SshConnection(self.args, settings)] @@ -414,13 +1035,46 @@ def get_working_directory(self): # type: () -> str """Return the working directory for the host.""" return '/root' - def get_docker_run_options(self): # type: () -> t.List[str] + def on_target_failure(self) -> None: + """Executed during failure handling if this profile is a target.""" + display.info(f'Checking container "{self.container_name}" logs...') + + try: + docker_logs(self.args, self.container_name) + except SubprocessError as ex: + display.error(str(ex)) + + if self.config.cgroup != CGroupVersion.NONE: + # Containers with cgroup support are assumed to be running systemd. + display.info(f'Checking container "{self.container_name}" systemd logs...') + + try: + docker_exec(self.args, self.container_name, ['journalctl'], capture=False) + except SubprocessError as ex: + display.error(str(ex)) + + display.error(f'Connection to container "{self.container_name}" failed. See logs and original error above.') + + def get_common_run_options(self) -> list[str]: """Return a list of options needed to run the container.""" options = [ - '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro', - f'--privileged={str(self.config.privileged).lower()}', + # These temporary mount points need to be created at run time when using Docker. + # They are automatically provided by Podman, but will be overridden by VOLUME instructions for the container, if they exist. + # If supporting containers with VOLUME instructions is not desired, these options could be limited to use with Docker. + # See: https://github.com/containers/podman/pull/1318 + # Previously they were handled by the VOLUME instruction during container image creation. + # However, that approach creates anonymous volumes when running the container, which are then left behind after the container is deleted. + # These options eliminate the need for the VOLUME instruction, and override it if they are present. + # The mount options used are those typically found on Linux systems. + # Of special note is the "exec" option for "/tmp", which is required by ansible-test for path injection of executables using temporary directories. + '--tmpfs', '/tmp:exec', + '--tmpfs', '/run:exec', + '--tmpfs', '/run/lock', # some systemd containers require a separate tmpfs here, such as Ubuntu 20.04 and Ubuntu 22.04 ] + if self.config.privileged: + options.append('--privileged') + if self.config.memory: options.extend([ f'--memory={self.config.memory}', @@ -448,7 +1102,7 @@ def wait(self): # type: () -> None """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" self.wait_until_ready() - def get_inventory_variables(self): + def get_inventory_variables(self): # type: () -> t.Dict[str, t.Optional[t.Union[str, int]]] """Return inventory variables for accessing this host.""" core_ci = self.wait_for_instance() connection = core_ci.connection @@ -460,8 +1114,15 @@ def get_inventory_variables(self): ansible_port=connection.port, ansible_user=connection.username, ansible_ssh_private_key_file=core_ci.ssh_key.key, + # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later. + # IOS CSR 1000V uses an ancient SSH server, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later. + # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here. + # NOTE: This option only exists in ansible-core 2.14 and later. For older ansible-core versions, use of Paramiko 2.8.x or earlier is required. + # See: https://github.com/ansible/ansible/pull/78789 + # See: https://github.com/ansible/ansible/pull/78842 + ansible_paramiko_use_rsa_sha2_algorithms='no', ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform, - ) + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] return variables @@ -483,13 +1144,14 @@ def wait_until_ready(self): # type: () -> None for dummy in range(1, 90): try: - intercept_python(self.args, self.args.controller_python, cmd, env) - except SubprocessError: + intercept_python(self.args, self.args.controller_python, cmd, env, capture=True) + except SubprocessError as ex: + display.warning(str(ex)) time.sleep(10) else: return - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" @@ -501,6 +1163,10 @@ def get_controller_target_connections(self): # type: () -> t.List[SshConnection port=core_ci.connection.port, user=core_ci.connection.username, identity_file=core_ci.ssh_key.key, + # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # IOS CSR 1000V uses an ancient SSH server, making it incompatible with the default configuration of OpenSSH 8.8 and later clients. + # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here. + enable_rsa_sha1=True, ) return [SshConnection(self.args, settings)] @@ -546,7 +1212,7 @@ def configure(self): # type: () -> None shell = setup_sh.splitlines()[0][2:] ssh = self.get_origin_controller_connection() - ssh.run([shell], data=setup_sh) + ssh.run([shell], data=setup_sh, capture=False) def get_ssh_connection(self): # type: () -> SshConnection """Return an SSH connection for accessing the host.""" @@ -562,15 +1228,12 @@ def get_ssh_connection(self): # type: () -> SshConnection ) if settings.user == 'root': - become = None - elif self.config.platform == 'freebsd': - become = Su() - elif self.config.platform == 'macos': - become = Sudo() - elif self.config.platform == 'rhel': - become = Sudo() + become = None # type: t.Optional[Become] + elif self.config.become: + become = SUPPORTED_BECOME_METHODS[self.config.become]() else: - raise NotImplementedError(f'Become support has not been implemented for platform "{self.config.platform}" and user "{settings.user}" is not root.') + display.warning(f'Defaulting to "sudo" for platform "{self.config.platform}" become support.', unique=True) + become = Sudo() return SshConnection(self.args, settings, become) @@ -582,12 +1245,12 @@ def wait_until_ready(self): # type: () -> str try: return self.get_working_directory() except SubprocessError as ex: - if 'Permission denied' in ex.message: - raise - + # No "Permission denied" check is performed here. + # Unlike containers, with remote instances, user configuration isn't guaranteed to have been completed before SSH connections are attempted. + display.warning(str(ex)) time.sleep(10) - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" @@ -672,7 +1335,7 @@ def wait(self): # type: () -> None """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets.""" self.wait_until_ready() - def get_inventory_variables(self): + def get_inventory_variables(self): # type: () -> t.Dict[str, t.Optional[t.Union[str, int]]] """Return inventory variables for accessing this host.""" core_ci = self.wait_for_instance() connection = core_ci.connection @@ -686,7 +1349,7 @@ def get_inventory_variables(self): ansible_user=connection.username, ansible_password=connection.password, ansible_ssh_private_key_file=core_ci.ssh_key.key, - ) + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] # HACK: force 2016 to use NTLM + HTTP message encryption if self.config.version == '2016': @@ -716,13 +1379,14 @@ def wait_until_ready(self): # type: () -> None for dummy in range(1, 120): try: - intercept_python(self.args, self.args.controller_python, cmd, env) - except SubprocessError: + intercept_python(self.args, self.args.controller_python, cmd, env, capture=True) + except SubprocessError as ex: + display.warning(str(ex)) time.sleep(10) else: return - raise ApplicationError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') + raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.') def get_controller_target_connections(self): # type: () -> t.List[SshConnection] """Return SSH connection(s) for accessing the host as a target from the controller.""" diff --git a/test/lib/ansible_test/_internal/inventory.py b/test/lib/ansible_test/_internal/inventory.py index 73a9ae9c38b3cc..7e930040c0d6e5 100644 --- a/test/lib/ansible_test/_internal/inventory.py +++ b/test/lib/ansible_test/_internal/inventory.py @@ -25,6 +25,10 @@ WindowsRemoteProfile, ) +from .ssh import ( + ssh_options_to_str, +) + def create_controller_inventory(args, path, controller_host): # type: (EnvironmentConfig, str, ControllerHostProfile) -> None """Create and return inventory for use in controller-only integration tests.""" @@ -94,7 +98,7 @@ def create_network_inventory(args, path, target_hosts): # type: (EnvironmentCon return target_hosts = t.cast(t.List[NetworkRemoteProfile], target_hosts) - host_groups = {target_host.config.platform: {} for target_host in target_hosts} + host_groups = {target_host.config.platform: {} for target_host in target_hosts} # type: t.Dict[str, t.Dict[str, t.Dict[str, t.Union[str, int]]]] for target_host in target_hosts: host_groups[target_host.config.platform][sanitize_host_name(target_host.config.name)] = target_host.get_inventory_variables() @@ -149,7 +153,8 @@ def create_posix_inventory(args, path, target_hosts, needs_ssh=False): # type: ansible_port=ssh.settings.port, ansible_user=ssh.settings.user, ansible_ssh_private_key_file=ssh.settings.identity_file, - ) + ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options), + ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] if ssh.become: testhost.update( diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py index 9d3301a147a2ad..df8c98d4987189 100644 --- a/test/lib/ansible_test/_internal/io.py +++ b/test/lib/ansible_test/_internal/io.py @@ -14,17 +14,17 @@ ) -def read_json_file(path): # type: (t.AnyStr) -> t.Any +def read_json_file(path): # type: (str) -> t.Any """Parse and return the json content from the specified path.""" return json.loads(read_text_file(path)) -def read_text_file(path): # type: (t.AnyStr) -> t.Text +def read_text_file(path): # type: (str) -> t.Text """Return the contents of the specified path as text.""" return to_text(read_binary_file(path)) -def read_binary_file(path): # type: (t.AnyStr) -> bytes +def read_binary_file(path): # type: (str) -> bytes """Return the contents of the specified path as bytes.""" with open_binary_file(path) as file_obj: return file_obj.read() @@ -43,7 +43,7 @@ def write_json_file(path, # type: str content, # type: t.Any create_directories=False, # type: bool formatted=True, # type: bool - encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + encoder=None, # type: t.Optional[t.Type[json.JSONEncoder]] ): # type: (...) -> str """Write the given json content to the specified path, optionally creating missing directories.""" text_content = json.dumps(content, @@ -67,21 +67,19 @@ def write_text_file(path, content, create_directories=False): # type: (str, str file_obj.write(to_bytes(content)) -def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO +def open_text_file(path, mode='r'): # type: (str, str) -> t.IO[str] """Open the given path for text access.""" if 'b' in mode: raise Exception('mode cannot include "b" for text files: %s' % mode) - # noinspection PyTypeChecker return io.open(to_bytes(path), mode, encoding=ENCODING) # pylint: disable=consider-using-with -def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO +def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes] """Open the given path for binary access.""" if 'b' not in mode: raise Exception('mode must include "b" for binary files: %s' % mode) - # noinspection PyTypeChecker return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py index 769ec8348b6f24..e7f82b0aacbb6c 100644 --- a/test/lib/ansible_test/_internal/metadata.py +++ b/test/lib/ansible_test/_internal/metadata.py @@ -21,8 +21,8 @@ class Metadata: """Metadata object for passing data to delegated tests.""" def __init__(self): """Initialize metadata.""" - self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]] - self.cloud_config = None # type: t.Optional[t.Dict[str, str]] + self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int], ...]] + self.cloud_config = None # type: t.Optional[t.Dict[str, t.Dict[str, t.Union[int, str, bool]]]] self.change_description = None # type: t.Optional[ChangeDescription] self.ci_provider = None # type: t.Optional[str] diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py index d92f9f6589f056..e6ccc6ed5ffe66 100644 --- a/test/lib/ansible_test/_internal/payload.py +++ b/test/lib/ansible_test/_internal/payload.py @@ -34,8 +34,8 @@ ) # improve performance by disabling uid/gid lookups -tarfile.pwd = None -tarfile.grp = None +tarfile.pwd = None # type: ignore[attr-defined] # undocumented attribute +tarfile.grp = None # type: ignore[attr-defined] # undocumented attribute def create_payload(args, dst_path): # type: (CommonConfig, str) -> None @@ -69,8 +69,8 @@ def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile. collection_layouts = data_context().create_collection_layouts() - content_files = [] - extra_files = [] + content_files = [] # type: t.List[t.Tuple[str, str]] + extra_files = [] # type: t.List[t.Tuple[str, str]] for layout in collection_layouts: if layout == data_context().content: diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py index e8972ac87c49e9..783461426589bb 100644 --- a/test/lib/ansible_test/_internal/provider/__init__.py +++ b/test/lib/ansible_test/_internal/provider/__init__.py @@ -16,7 +16,7 @@ def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) - return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__)) -def find_path_provider(provider_type, # type: t.Type[TPathProvider], +def find_path_provider(provider_type, # type: t.Type[TPathProvider] provider_classes, # type: t.List[t.Type[TPathProvider]] path, # type: str walk, # type: bool diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py index 147fcbd56fee00..9fd13550e5eeb7 100644 --- a/test/lib/ansible_test/_internal/provider/layout/__init__.py +++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py @@ -91,6 +91,7 @@ def __init__(self, unit_module_path, # type: str unit_module_utils_path, # type: str unit_messages, # type: t.Optional[LayoutMessages] + unsupported=False, # type: bool ): # type: (...) -> None super().__init__(root, paths) @@ -108,6 +109,7 @@ def __init__(self, self.unit_module_path = unit_module_path self.unit_module_utils_path = unit_module_utils_path self.unit_messages = unit_messages + self.unsupported = unsupported self.is_ansible = root == ANSIBLE_SOURCE_ROOT @@ -204,7 +206,7 @@ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple[t.Dict[str, t.Any], t.List[str]] """Return a filesystem tree from the given list of paths.""" - tree = {}, [] + tree = {}, [] # type: t.Tuple[t.Dict[str, t.Any], t.List[str]] for path in paths: parts = path.split(os.path.sep) diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py index 5dca046f02bedb..6b826ee4a30397 100644 --- a/test/lib/ansible_test/_internal/provider/layout/collection.py +++ b/test/lib/ansible_test/_internal/provider/layout/collection.py @@ -11,6 +11,10 @@ LayoutMessages, ) +from ...util import ( + is_valid_identifier, +) + class CollectionLayout(LayoutProvider): """Layout provider for Ansible collections.""" @@ -28,6 +32,10 @@ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout collection_root = os.path.dirname(os.path.dirname(root)) collection_dir = os.path.relpath(root, collection_root) + + collection_namespace: str + collection_name: str + collection_namespace, collection_name = collection_dir.split(os.path.sep) collection_root = os.path.dirname(collection_root) @@ -65,6 +73,7 @@ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout unit_module_path='tests/unit/plugins/modules', unit_module_utils_path='tests/unit/plugins/module_utils', unit_messages=unit_messages, + unsupported=not(is_valid_identifier(collection_namespace) and is_valid_identifier(collection_name)), ) @staticmethod diff --git a/test/lib/ansible_test/_internal/provider/layout/unsupported.py b/test/lib/ansible_test/_internal/provider/layout/unsupported.py new file mode 100644 index 00000000000000..80a9129198b2ad --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/layout/unsupported.py @@ -0,0 +1,42 @@ +"""Layout provider for an unsupported directory layout.""" +from __future__ import annotations + +import typing as t + +from . import ( + ContentLayout, + LayoutProvider, +) + + +class UnsupportedLayout(LayoutProvider): + """Layout provider for an unsupported directory layout.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout + """Create a Layout using the given root and paths.""" + plugin_paths = dict((p, p) for p in self.PLUGIN_TYPES) + + return ContentLayout(root, + paths, + plugin_paths=plugin_paths, + collection=None, + test_path='', + results_path='', + sanity_path='', + sanity_messages=None, + integration_path='', + integration_targets_path='', + integration_vars_path='', + integration_messages=None, + unit_path='', + unit_module_path='', + unit_module_utils_path='', + unit_messages=None, + unsupported=True, + ) diff --git a/test/lib/ansible_test/_internal/provider/source/unsupported.py b/test/lib/ansible_test/_internal/provider/source/unsupported.py new file mode 100644 index 00000000000000..ff5562c62c6537 --- /dev/null +++ b/test/lib/ansible_test/_internal/provider/source/unsupported.py @@ -0,0 +1,22 @@ +"""Source provider to use when the layout is unsupported.""" +from __future__ import annotations + +import typing as t + +from . import ( + SourceProvider, +) + + +class UnsupportedSource(SourceProvider): + """Source provider to use when the layout is unsupported.""" + sequence = 0 # disable automatic detection + + @staticmethod + def is_content_root(path): # type: (str) -> bool + """Return True if the given path is a content root for this provider.""" + return False + + def get_paths(self, path): # type: (str) -> t.List[str] + """Return the list of available content paths under the given path.""" + return [] diff --git a/test/lib/ansible_test/_internal/provisioning.py b/test/lib/ansible_test/_internal/provisioning.py index a95360360b3395..5a5361ed60ebe3 100644 --- a/test/lib/ansible_test/_internal/provisioning.py +++ b/test/lib/ansible_test/_internal/provisioning.py @@ -18,10 +18,12 @@ from .util import ( ApplicationError, + HostConnectionError, display, open_binary_file, verify_sys_executable, version_to_str, + type_guard, ) from .thread import ( @@ -88,17 +90,16 @@ def targets(self, profile_type): # type: (t.Type[THostProfile]) -> t.List[THost if not self.target_profiles: raise Exception('No target profiles found.') - if not all(isinstance(target, profile_type) for target in self.target_profiles): - raise Exception(f'Target profile(s) are not of the required type: {profile_type}') + assert type_guard(self.target_profiles, profile_type) - return self.target_profiles + return t.cast(t.List[THostProfile], self.target_profiles) def prepare_profiles( args, # type: TEnvironmentConfig targets_use_pypi=False, # type: bool skip_setup=False, # type: bool - requirements=None, # type: t.Optional[t.Callable[[TEnvironmentConfig, HostState], None]] + requirements=None, # type: t.Optional[t.Callable[[HostProfile], None]] ): # type: (...) -> HostState """ Create new profiles, or load existing ones, and return them. @@ -138,7 +139,7 @@ def provision(profile): # type: (HostProfile) -> None check_controller_python(args, host_state) if requirements: - requirements(args, host_state) + requirements(host_state.controller_profile) def configure(profile): # type: (HostProfile) -> None """Configure the given profile.""" @@ -147,6 +148,9 @@ def configure(profile): # type: (HostProfile) -> None if not skip_setup: profile.configure() + if requirements: + requirements(profile) + dispatch_jobs([(profile, WrappedThread(functools.partial(configure, profile))) for profile in host_state.target_profiles]) return host_state @@ -184,13 +188,26 @@ def dispatch_jobs(jobs): # type: (t.List[t.Tuple[HostProfile, WrappedThread]]) time.sleep(1) failed = False + connection_failures = 0 for profile, thread in jobs: try: thread.wait_for_result() + except HostConnectionError as ex: + display.error(f'Host {profile.config} connection failed:\n{ex}') + failed = True + connection_failures += 1 + except ApplicationError as ex: + display.error(f'Host {profile.config} job failed:\n{ex}') + failed = True except Exception as ex: # pylint: disable=broad-except - display.error(f'Host {profile} job failed: {ex}\n{"".join(traceback.format_tb(ex.__traceback__))}') + name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}' + display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n' + f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}') failed = True + if connection_failures: + raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.') + if failed: raise ApplicationError('Host job(s) failed. See previous error(s) for details.') diff --git a/test/lib/ansible_test/_internal/pypi_proxy.py b/test/lib/ansible_test/_internal/pypi_proxy.py index 968794fd2080f0..e31db6dcc1521b 100644 --- a/test/lib/ansible_test/_internal/pypi_proxy.py +++ b/test/lib/ansible_test/_internal/pypi_proxy.py @@ -124,7 +124,8 @@ def cleanup_pypi_proxy(): force = 'yes' if profile.config.is_managed else 'no' - run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', dict(pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force), capture=True) + run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', capture=True, variables=dict( + pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force)) atexit.register(cleanup_pypi_proxy) diff --git a/test/lib/ansible_test/_internal/python_requirements.py b/test/lib/ansible_test/_internal/python_requirements.py index aaaf44b8b3f99f..eed177c3932eeb 100644 --- a/test/lib/ansible_test/_internal/python_requirements.py +++ b/test/lib/ansible_test/_internal/python_requirements.py @@ -142,9 +142,9 @@ def install_requirements( if ansible: try: - ansible_cache = install_requirements.ansible_cache + ansible_cache = install_requirements.ansible_cache # type: ignore[attr-defined] except AttributeError: - ansible_cache = install_requirements.ansible_cache = {} + ansible_cache = install_requirements.ansible_cache = {} # type: ignore[attr-defined] ansible_installed = ansible_cache.get(python.path) @@ -262,7 +262,7 @@ def run_pip( if not args.explain: try: - connection.run([python.path], data=script) + connection.run([python.path], data=script, capture=False) except SubprocessError: script = prepare_pip_script([PipVersion()]) @@ -492,7 +492,7 @@ def prepare_pip_script(commands): # type: (t.List[PipCommand]) -> str def usable_pip_file(path): # type: (t.Optional[str]) -> bool """Return True if the specified pip file is usable, otherwise False.""" - return path and os.path.exists(path) and os.path.getsize(path) + return bool(path) and os.path.exists(path) and bool(os.path.getsize(path)) # Cryptography diff --git a/test/lib/ansible_test/_internal/ssh.py b/test/lib/ansible_test/_internal/ssh.py index 21212dc1aa1f62..b5fcd5a813a1ad 100644 --- a/test/lib/ansible_test/_internal/ssh.py +++ b/test/lib/ansible_test/_internal/ssh.py @@ -2,6 +2,7 @@ from __future__ import annotations import dataclasses +import itertools import json import os import random @@ -38,16 +39,46 @@ class SshConnectionDetail: identity_file: str python_interpreter: t.Optional[str] = None shell_type: t.Optional[str] = None + enable_rsa_sha1: bool = False def __post_init__(self): self.name = sanitize_host_name(self.name) + @property + def options(self) -> dict[str, str]: + """OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument.""" + options: dict[str, str] = {} + + if self.enable_rsa_sha1: + # Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support. + # OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa). + # OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms. + # See: https://www.openssh.com/txt/release-8.8 + algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later + + options.update(dict( + # Host key signature algorithms that the client wants to use. + # Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients. + # This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix. + # See: https://www.openssh.com/txt/release-7.0 + HostKeyAlgorithms=algorithms, + # Signature algorithms that will be used for public key authentication. + # Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients. + # This option was added in OpenSSH 7.0, released on 2015-08-11. + # See: https://www.openssh.com/txt/release-7.0 + # This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5. + # See: https://www.openssh.com/txt/release-8.5 + PubkeyAcceptedKeyTypes=algorithms, + )) + + return options + class SshProcess: """Wrapper around an SSH process.""" def __init__(self, process): # type: (t.Optional[subprocess.Popen]) -> None self._process = process - self.pending_forwards = None # type: t.Optional[t.Set[t.Tuple[str, int]]] + self.pending_forwards = None # type: t.Optional[t.List[t.Tuple[str, int]]] self.forwards = {} # type: t.Dict[t.Tuple[str, int], int] @@ -71,7 +102,7 @@ def wait(self): # type: () -> None def collect_port_forwards(self): # type: (SshProcess) -> t.Dict[t.Tuple[str, int], int] """Collect port assignments for dynamic SSH port forwards.""" - errors = [] + errors = [] # type: t.List[str] display.info('Collecting %d SSH port forward(s).' % len(self.pending_forwards), verbosity=2) @@ -107,7 +138,7 @@ def collect_port_forwards(self): # type: (SshProcess) -> t.Dict[t.Tuple[str, in dst = (dst_host, dst_port) else: # explain mode - dst = list(self.pending_forwards)[0] + dst = self.pending_forwards[0] src_port = random.randint(40000, 50000) self.pending_forwards.remove(dst) @@ -141,7 +172,7 @@ def create_ssh_command( if ssh.user: cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine - ssh_options = dict( + ssh_options: dict[str, t.Union[int, str]] = dict( BatchMode='yes', ExitOnForwardFailure='yes', LogLevel='ERROR', @@ -153,9 +184,7 @@ def create_ssh_command( ssh_options.update(options or {}) - for key, value in sorted(ssh_options.items()): - cmd.extend(['-o', '='.join([key, str(value)])]) - + cmd.extend(ssh_options_to_list(ssh_options)) cmd.extend(cli_args or []) cmd.append(ssh.host) @@ -165,6 +194,18 @@ def create_ssh_command( return cmd +def ssh_options_to_list(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> list[str]: + """Format a dictionary of SSH options as a list suitable for passing to the `ssh` command.""" + return list(itertools.chain.from_iterable( + ('-o', f'{key}={value}') for key, value in sorted(options.items()) + )) + + +def ssh_options_to_str(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> str: + """Format a dictionary of SSH options as a string suitable for passing as `ansible_ssh_extra_args` in inventory.""" + return shlex.join(ssh_options_to_list(options)) + + def run_ssh_command( args, # type: EnvironmentConfig ssh, # type: SshConnectionDetail @@ -202,7 +243,7 @@ def create_ssh_port_forwards( """ options = dict( LogLevel='INFO', # info level required to get messages on stderr indicating the ports assigned to each forward - ) + ) # type: t.Dict[str, t.Union[str, int]] cli_args = [] @@ -221,7 +262,7 @@ def create_ssh_port_redirects( redirects, # type: t.List[t.Tuple[int, str, int]] ): # type: (...) -> SshProcess """Create SSH port redirections using the provided list of tuples (bind_port, target_host, target_port).""" - options = {} + options = {} # type: t.Dict[str, t.Union[str, int]] cli_args = [] for bind_port, target_host, target_port in redirects: @@ -245,7 +286,7 @@ def generate_ssh_inventory(ssh_connections): # type: (t.List[SshConnectionDetai ansible_pipelining='yes', ansible_python_interpreter=ssh.python_interpreter, ansible_shell_type=ssh.shell_type, - ansible_ssh_extra_args='-o UserKnownHostsFile=/dev/null', # avoid changing the test environment + ansible_ssh_extra_args=ssh_options_to_str(dict(UserKnownHostsFile='/dev/null', **ssh.options)), # avoid changing the test environment ansible_ssh_host_key_checking='no', ))) for ssh in ssh_connections), ), diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py index ced111f784fa7c..6b29605d921c57 100644 --- a/test/lib/ansible_test/_internal/target.py +++ b/test/lib/ansible_test/_internal/target.py @@ -155,7 +155,7 @@ def walk_units_targets(): # type: () -> t.Iterable[TestTarget] return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_') -def walk_compile_targets(include_symlinks=True): # type: (bool) -> t.Iterable[TestTarget, ...] +def walk_compile_targets(include_symlinks=True): # type: (bool) -> t.Iterable[TestTarget] """Return an iterable of compile targets.""" return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks) @@ -611,6 +611,9 @@ def __init__(self, path, modules, prefixes): # type: (str, t.FrozenSet[str], t. groups += [a for a in static_aliases if a not in modules] groups += ['module/%s' % m for m in self.modules] + if data_context().content.is_ansible and (self.name == 'ansible-test' or self.name.startswith('ansible-test-')): + groups.append('ansible-test') + if not self.modules: groups.append('non_module') @@ -699,6 +702,8 @@ def __init__(self, path, modules, prefixes): # type: (str, t.FrozenSet[str], t. # configuration + self.retry_never = 'retry/never/' in self.aliases + self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/')))) self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/')))) self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/')))) diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py index 2ebda60eafc8e4..05ec5b595930c5 100644 --- a/test/lib/ansible_test/_internal/test.py +++ b/test/lib/ansible_test/_internal/test.py @@ -218,7 +218,7 @@ def __init__( command, # type: str test, # type: str python_version=None, # type: t.Optional[str] - messages=None, # type: t.Optional[t.List[TestMessage]] + messages=None, # type: t.Optional[t.Sequence[TestMessage]] summary=None, # type: t.Optional[str] ): super().__init__(command, test, python_version) @@ -264,10 +264,10 @@ def write_lint(self): # type: () -> None message = 'The test `%s` failed. See stderr output for details.' % command path = '' message = TestMessage(message, path) - print(message) + print(message) # display goes to stderr, this should be on stdout else: for message in self.messages: - print(message) + print(message) # display goes to stderr, this should be on stdout def write_junit(self, args): # type: (TestConfig) -> None """Write results to a junit XML file.""" diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py index 1b2fbec2b84d5b..601f60e44d1adf 100644 --- a/test/lib/ansible_test/_internal/thread.py +++ b/test/lib/ansible_test/_internal/thread.py @@ -1,6 +1,8 @@ """Python threading tools.""" from __future__ import annotations +import collections.abc as c +import contextlib import functools import sys import threading @@ -8,14 +10,14 @@ import typing as t -TCallable = t.TypeVar('TCallable', bound=t.Callable) +TCallable = t.TypeVar('TCallable', bound=t.Callable[..., t.Any]) class WrappedThread(threading.Thread): """Wrapper around Thread which captures results and exceptions.""" def __init__(self, action): # type: (t.Callable[[], t.Any]) -> None super().__init__() - self._result = queue.Queue() + self._result = queue.Queue() # type: queue.Queue[t.Any] self.action = action self.result = None @@ -25,8 +27,8 @@ def run(self): Do not override. Do not call directly. Executed by the start() method. """ # We truly want to catch anything that the worker thread might do including call sys.exit. - # Therefore we catch *everything* (including old-style class exceptions) - # noinspection PyBroadException, PyPep8 + # Therefore, we catch *everything* (including old-style class exceptions) + # noinspection PyBroadException try: self._result.put((self.action(), None)) # pylint: disable=locally-disabled, bare-except @@ -41,10 +43,7 @@ def wait_for_result(self): result, exception = self._result.get() if exception: - if sys.version_info[0] > 2: - raise exception[1].with_traceback(exception[2]) - # noinspection PyRedundantParentheses - exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used + raise exception[1].with_traceback(exception[2]) self.result = result @@ -61,4 +60,26 @@ def wrapper(*args, **kwargs): with lock: return func(*args, **kwargs) - return wrapper + return wrapper # type: ignore[return-value] # requires https://www.python.org/dev/peps/pep-0612/ support + + +__named_lock = threading.Lock() +__named_locks: dict[str, threading.Lock] = {} + + +@contextlib.contextmanager +def named_lock(name: str) -> c.Iterator[bool]: + """ + Context manager that provides named locks using threading.Lock instances. + Once named lock instances are created they are not deleted. + Returns True if this is the first instance of the named lock, otherwise False. + """ + with __named_lock: + if lock_instance := __named_locks.get(name): + first = False + else: + first = True + lock_instance = __named_locks[name] = threading.Lock() + + with lock_instance: + yield first diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py index fdd921e113ae00..ce710cdcd2758e 100644 --- a/test/lib/ansible_test/_internal/util.py +++ b/test/lib/ansible_test/_internal/util.py @@ -1,15 +1,20 @@ """Miscellaneous utility functions and classes.""" from __future__ import annotations +import abc import errno +import enum import fcntl +import importlib.util import inspect +import json +import keyword import os +import platform import pkgutil import random import re import shutil -import socket import stat import string import subprocess @@ -22,6 +27,11 @@ from struct import unpack, pack from termios import TIOCGWINSZ +try: + from typing_extensions import TypeGuard # TypeGuard was added in Python 3.9 +except ImportError: + TypeGuard = None + from .encoding import ( to_bytes, to_optional_bytes, @@ -35,6 +45,7 @@ from .thread import ( mutex, + WrappedThread, ) from .constants import ( @@ -48,12 +59,6 @@ PYTHON_PATHS = {} # type: t.Dict[str, str] -try: - # noinspection PyUnresolvedReferences - MAXFD = subprocess.MAXFD -except AttributeError: - MAXFD = -1 - COVERAGE_CONFIG_NAME = 'coveragerc' ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -79,6 +84,7 @@ ANSIBLE_TEST_TARGET_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'target') ANSIBLE_TEST_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'tools') +ANSIBLE_TEST_TARGET_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'tools') # Modes are set to allow all users the same level of access. # This permits files to be used in tests that change users. @@ -95,6 +101,41 @@ MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH +class OutputStream(enum.Enum): + """The output stream to use when running a subprocess and redirecting/capturing stdout or stderr.""" + + ORIGINAL = enum.auto() + AUTO = enum.auto() + + def get_buffer(self, original: t.BinaryIO) -> t.BinaryIO: + """Return the correct output buffer to use, taking into account the given original buffer.""" + + if self == OutputStream.ORIGINAL: + return original + + if self == OutputStream.AUTO: + return display.fd.buffer + + raise NotImplementedError(str(self)) + + +class Architecture: + """ + Normalized architecture names. + These are the architectures supported by ansible-test, such as when provisioning remote instances. + """ + X86_64 = 'x86_64' + AARCH64 = 'aarch64' + + +REMOTE_ARCHITECTURES = list(value for key, value in Architecture.__dict__.items() if not key.startswith('__')) + + +def is_valid_identifier(value: str) -> bool: + """Return True if the given value is a valid non-keyword Python identifier, otherwise return False.""" + return value.isidentifier() and not keyword.iskeyword(value) + + def cache(func): # type: (t.Callable[[], TValue]) -> t.Callable[[], TValue] """Enforce exclusive access on a decorated function and cache the result.""" storage = {} # type: t.Dict[None, TValue] @@ -113,6 +154,58 @@ def cache_func(): return wrapper +@mutex +def detect_architecture(python: str) -> t.Optional[str]: + """Detect the architecture of the specified Python and return a normalized version, or None if it cannot be determined.""" + results: t.Dict[str, t.Optional[str]] + + try: + results = detect_architecture.results # type: ignore[attr-defined] + except AttributeError: + results = detect_architecture.results = {} # type: ignore[attr-defined] + + if python in results: + return results[python] + + if python == sys.executable or os.path.realpath(python) == os.path.realpath(sys.executable): + uname = platform.uname() + else: + data = raw_command([python, '-c', 'import json, platform; print(json.dumps(platform.uname()));'], capture=True)[0] + uname = json.loads(data) + + translation = { + 'x86_64': Architecture.X86_64, # Linux, macOS + 'amd64': Architecture.X86_64, # FreeBSD + 'aarch64': Architecture.AARCH64, # Linux, FreeBSD + 'arm64': Architecture.AARCH64, # FreeBSD + } + + candidates = [] + + if len(uname) >= 5: + candidates.append(uname[4]) + + if len(uname) >= 6: + candidates.append(uname[5]) + + candidates = sorted(set(candidates)) + architectures = sorted(set(arch for arch in [translation.get(candidate) for candidate in candidates] if arch)) + + architecture: t.Optional[str] = None + + if not architectures: + display.warning(f'Unable to determine architecture for Python interpreter "{python}" from: {candidates}') + elif len(architectures) == 1: + architecture = architectures[0] + display.info(f'Detected architecture {architecture} for Python interpreter: {python}', verbosity=1) + else: + display.warning(f'Conflicting architectures detected ({architectures}) for Python interpreter "{python}" from: {candidates}') + + results[python] = architecture + + return architecture + + def filter_args(args, filters): # type: (t.List[str], t.Dict[str, int]) -> t.List[str] """Return a filtered version of the given command line arguments.""" remaining = 0 @@ -248,18 +341,46 @@ def get_available_python_versions(): # type: () -> t.Dict[str, str] def raw_command( cmd, # type: t.Iterable[str] - capture=False, # type: bool + capture, # type: bool env=None, # type: t.Optional[t.Dict[str, str]] data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] explain=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.Union[t.IO[bytes], int]] + stdout=None, # type: t.Optional[t.Union[t.IO[bytes], int]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] cmd_verbosity=1, # type: int str_errors='strict', # type: str error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return stdout and stderr as a tuple.""" + output_stream = output_stream or OutputStream.AUTO + + if capture and interactive: + raise InternalError('Cannot combine capture=True with interactive=True.') + + if data and interactive: + raise InternalError('Cannot combine data with interactive=True.') + + if stdin and interactive: + raise InternalError('Cannot combine stdin with interactive=True.') + + if stdout and interactive: + raise InternalError('Cannot combine stdout with interactive=True.') + + if stdin and data: + raise InternalError('Cannot combine stdin with data.') + + if stdout and not capture: + raise InternalError('Redirection of stdout requires capture=True to avoid redirection of stderr to stdout.') + + if output_stream != OutputStream.AUTO and capture: + raise InternalError(f'Cannot combine {output_stream=} with capture=True.') + + if output_stream != OutputStream.AUTO and interactive: + raise InternalError(f'Cannot combine {output_stream=} with interactive=True.') + if not cwd: cwd = os.getcwd() @@ -270,7 +391,30 @@ def raw_command( escaped_cmd = ' '.join(shlex.quote(c) for c in cmd) - display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True) + if capture: + description = 'Run' + elif interactive: + description = 'Interactive' + else: + description = 'Stream' + + description += ' command' + + with_types = [] + + if data: + with_types.append('data') + + if stdin: + with_types.append('stdin') + + if stdout: + with_types.append('stdout') + + if with_types: + description += f' with {"/".join(with_types)}' + + display.info(f'{description}: {escaped_cmd}', verbosity=cmd_verbosity, truncate=True) display.info('Working directory: %s' % cwd, verbosity=2) program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning') @@ -288,17 +432,23 @@ def raw_command( if stdin is not None: data = None - communicate = True elif data is not None: stdin = subprocess.PIPE communicate = True - - if stdout: - communicate = True - - if capture: + elif interactive: + pass # allow the subprocess access to our stdin + else: + stdin = subprocess.DEVNULL + + if not interactive: + # When not running interactively, send subprocess stdout/stderr through a pipe. + # This isolates the stdout/stderr of the subprocess from the current process, and also hides the current TTY from it, if any. + # This prevents subprocesses from sharing stdout/stderr with the current process or each other. + # Doing so allows subprocesses to safely make changes to their file handles, such as making them non-blocking (ssh does this). + # This also maintains consistency between local testing and CI systems, which typically do not provide a TTY. + # To maintain output ordering, a single pipe is used for both stdout/stderr when not capturing output unless the output stream is ORIGINAL. stdout = stdout or subprocess.PIPE - stderr = subprocess.PIPE + stderr = subprocess.PIPE if capture or output_stream == OutputStream.ORIGINAL else subprocess.STDOUT communicate = True else: stderr = None @@ -318,7 +468,8 @@ def raw_command( if communicate: data_bytes = to_optional_bytes(data) - stdout_bytes, stderr_bytes = process.communicate(data_bytes) + stdout_bytes, stderr_bytes = communicate_with_process(process, data_bytes, stdout == subprocess.PIPE, stderr == subprocess.PIPE, capture=capture, + output_stream=output_stream) stdout_text = to_optional_text(stdout_bytes, str_errors) or u'' stderr_text = to_optional_text(stderr_bytes, str_errors) or u'' else: @@ -341,6 +492,122 @@ def raw_command( raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime, error_callback) +def communicate_with_process( + process: subprocess.Popen, + stdin: t.Optional[bytes], + stdout: bool, + stderr: bool, + capture: bool, + output_stream: OutputStream, +) -> t.Tuple[bytes, bytes]: + """Communicate with the specified process, handling stdin/stdout/stderr as requested.""" + threads: t.List[WrappedThread] = [] + reader: t.Type[ReaderThread] + + if capture: + reader = CaptureThread + else: + reader = OutputThread + + if stdin is not None: + threads.append(WriterThread(process.stdin, stdin)) + + if stdout: + stdout_reader = reader(process.stdout, output_stream.get_buffer(sys.stdout.buffer)) + threads.append(stdout_reader) + else: + stdout_reader = None + + if stderr: + stderr_reader = reader(process.stderr, output_stream.get_buffer(sys.stderr.buffer)) + threads.append(stderr_reader) + else: + stderr_reader = None + + for thread in threads: + thread.start() + + for thread in threads: + try: + thread.wait_for_result() + except Exception as ex: # pylint: disable=broad-except + display.error(str(ex)) + + if isinstance(stdout_reader, ReaderThread): + stdout_bytes = b''.join(stdout_reader.lines) + else: + stdout_bytes = b'' + + if isinstance(stderr_reader, ReaderThread): + stderr_bytes = b''.join(stderr_reader.lines) + else: + stderr_bytes = b'' + + process.wait() + + return stdout_bytes, stderr_bytes + + +class WriterThread(WrappedThread): + """Thread to write data to stdin of a subprocess.""" + def __init__(self, handle: t.IO[bytes], data: bytes) -> None: + super().__init__(self._run) + + self.handle = handle + self.data = data + + def _run(self) -> None: + """Workload to run on a thread.""" + try: + self.handle.write(self.data) + self.handle.flush() + finally: + self.handle.close() + + +class ReaderThread(WrappedThread, metaclass=abc.ABCMeta): + """Thread to read stdout from a subprocess.""" + def __init__(self, handle: t.IO[bytes], buffer: t.BinaryIO) -> None: + super().__init__(self._run) + + self.handle = handle + self.buffer = buffer + self.lines = [] # type: t.List[bytes] + + @abc.abstractmethod + def _run(self) -> None: + """Workload to run on a thread.""" + + +class CaptureThread(ReaderThread): + """Thread to capture stdout from a subprocess into a buffer.""" + def _run(self) -> None: + """Workload to run on a thread.""" + src = self.handle + dst = self.lines + + try: + for line in src: + dst.append(line) + finally: + src.close() + + +class OutputThread(ReaderThread): + """Thread to pass stdout from a subprocess to stdout.""" + def _run(self) -> None: + """Workload to run on a thread.""" + src = self.handle + dst = self.buffer + + try: + for line in src: + dst.write(line) + dst.flush() + finally: + src.close() + + def common_environment(): """Common environment used for executing all programs.""" env = dict( @@ -404,6 +671,16 @@ def pass_vars(required, optional): # type: (t.Collection[str], t.Collection[str return env +def verified_chmod(path: str, mode: int) -> None: + """Perform chmod on the specified path and then verify the permissions were applied.""" + os.chmod(path, mode) # pylint: disable=ansible-bad-function + + executable = any(mode & perm for perm in (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)) + + if executable and not os.access(path, os.X_OK): + raise ApplicationError(f'Path "{path}" should executable, but is not. Is the filesystem mounted with the "noexec" option?') + + def remove_tree(path): # type: (str) -> None """Remove the specified directory, siliently continuing if the directory does not exist.""" try: @@ -466,7 +743,6 @@ def is_binary_file(path): # type: (str) -> bool return True with open_binary_file(path) as path_fd: - # noinspection PyTypeChecker return b'\0' in path_fd.read(4096) @@ -514,7 +790,7 @@ def __init__(self): self.color = sys.stdout.isatty() self.warnings = [] self.warnings_unique = set() - self.info_stderr = False + self.fd = sys.stderr # default to stderr until config is initialized to avoid early messages going to stdout self.rows = 0 self.columns = 0 self.truncate = 0 @@ -526,7 +802,7 @@ def __init__(self): def __warning(self, message): # type: (str) -> None """Internal implementation for displaying a warning message.""" - self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr) + self.print_message('WARNING: %s' % message, color=self.purple) def review_warnings(self): # type: () -> None """Review all warnings which previously occurred.""" @@ -554,23 +830,27 @@ def warning(self, message, unique=False, verbosity=0): # type: (str, bool, int) def notice(self, message): # type: (str) -> None """Display a notice level message.""" - self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr) + self.print_message('NOTICE: %s' % message, color=self.purple) def error(self, message): # type: (str) -> None """Display an error level message.""" - self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr) + self.print_message('ERROR: %s' % message, color=self.red) + + def fatal(self, message): # type: (str) -> None + """Display a fatal level message.""" + self.print_message('FATAL: %s' % message, color=self.red, stderr=True) def info(self, message, verbosity=0, truncate=False): # type: (str, int, bool) -> None """Display an info level message.""" if self.verbosity >= verbosity: color = self.verbosity_colors.get(verbosity, self.yellow) - self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate) + self.print_message(message, color=color, truncate=truncate) def print_message( # pylint: disable=locally-disabled, invalid-name self, message, # type: str color=None, # type: t.Optional[str] - fd=sys.stdout, # type: t.TextIO + stderr=False, # type: bool truncate=False, # type: bool ): # type: (...) -> None """Display a message.""" @@ -590,13 +870,18 @@ def print_message( # pylint: disable=locally-disabled, invalid-name message = message.replace(self.clear, color) message = '%s%s%s' % (color, message, self.clear) - if sys.version_info[0] == 2: - message = to_bytes(message) + fd = sys.stderr if stderr else self.fd print(message, file=fd) fd.flush() +class InternalError(Exception): + """An unhandled internal error indicating a bug in the code.""" + def __init__(self, message: str) -> None: + super().__init__(f'An internal error has occurred in ansible-test: {message}') + + class ApplicationError(Exception): """General application error.""" @@ -649,12 +934,32 @@ def __init__(self, name): # type: (str) -> None self.name = name -def retry(func, ex_type=SubprocessError, sleep=10, attempts=10): +class HostConnectionError(ApplicationError): + """ + Raised when the initial connection during host profile setup has failed and all retries have been exhausted. + Raised by provisioning code when one or more provisioning threads raise this exception. + Also raised when an SSH connection fails for the shell command. + """ + def __init__(self, message: str, callback: t.Callable[[], None] = None) -> None: + super().__init__(message) + + self._callback = callback + + def run_callback(self) -> None: + """Run the error callback, if any.""" + if self._callback: + self._callback() + + +def retry(func, ex_type=SubprocessError, sleep=10, attempts=10, warn=True): """Retry the specified function on failure.""" for dummy in range(1, attempts): try: return func() - except ex_type: + except ex_type as ex: + if warn: + display.warning(str(ex)) + time.sleep(sleep) return func() @@ -771,23 +1076,10 @@ def load_module(path, name): # type: (str, str) -> None if name in sys.modules: return - if sys.version_info >= (3, 4): - import importlib.util - - spec = importlib.util.spec_from_file_location(name, path) - module = importlib.util.module_from_spec(spec) - # noinspection PyUnresolvedReferences - spec.loader.exec_module(module) - - sys.modules[name] = module - else: - # noinspection PyDeprecation - import imp # pylint: disable=deprecated-module - - # load_source (and thus load_module) require a file opened with `open` in text mode - with open(to_bytes(path)) as module_file: - # noinspection PyDeprecation - imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE)) + spec = importlib.util.spec_from_file_location(name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) def sanitize_host_name(name): @@ -795,18 +1087,6 @@ def sanitize_host_name(name): return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-') -@cache -def get_host_ip(): - """Return the host's IP address.""" - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: - sock.connect(('10.255.255.255', 22)) - host_ip = get_host_ip.ip = sock.getsockname()[0] - - display.info('Detected host IP: %s' % host_ip, verbosity=1) - - return host_ip - - def get_generic_type(base_type, generic_base_type): # type: (t.Type, t.Type[TType]) -> t.Optional[t.Type[TType]] """Return the generic type arg derived from the generic_base_type type that is associated with the base_type type, if any, otherwise return None.""" # noinspection PyUnresolvedReferences @@ -840,4 +1120,19 @@ def verify_sys_executable(path): # type: (str) -> t.Optional[str] return expected_executable +def type_guard(sequence: t.Sequence[t.Any], guard_type: t.Type[C]) -> TypeGuard[t.Sequence[C]]: + """ + Raises an exception if any item in the given sequence does not match the specified guard type. + Use with assert so that type checkers are aware of the type guard. + """ + invalid_types = set(type(item) for item in sequence if not isinstance(item, guard_type)) + + if not invalid_types: + return True + + invalid_type_names = sorted(str(item) for item in invalid_types) + + raise Exception(f'Sequence required to contain only {guard_type} includes: {", ".join(invalid_type_names)}') + + display = Display() # pylint: disable=locally-disabled, invalid-name diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py index f77040b17087db..ecf8ae6676445f 100644 --- a/test/lib/ansible_test/_internal/util_common.py +++ b/test/lib/ansible_test/_internal/util_common.py @@ -28,14 +28,16 @@ MODE_DIRECTORY, MODE_FILE_EXECUTE, MODE_FILE, + OutputStream, PYTHON_PATHS, raw_command, ANSIBLE_TEST_DATA_ROOT, ANSIBLE_TEST_TARGET_ROOT, - ANSIBLE_TEST_TOOLS_ROOT, + ANSIBLE_TEST_TARGET_TOOLS_ROOT, ApplicationError, SubprocessError, generate_name, + verified_chmod, ) from .io import ( @@ -58,7 +60,7 @@ VirtualPythonConfig, ) -CHECK_YAML_VERSIONS = {} +CHECK_YAML_VERSIONS = {} # type: t.Dict[str, t.Any] class ShellScriptTemplate: @@ -66,7 +68,7 @@ class ShellScriptTemplate: def __init__(self, template): # type: (t.Text) -> None self.template = template - def substitute(self, **kwargs): # type: (t.Dict[str, t.Union[str, t.List[str]]]) -> str + def substitute(self, **kwargs: t.Union[str, t.List[str]]) -> str: """Return a string templated with the given arguments.""" kvp = dict((k, self.quote(v)) for k, v in kwargs.items()) pattern = re.compile(r'#{(?P[^}]+)}') @@ -127,6 +129,8 @@ class CommonConfig: """Configuration common to all commands.""" def __init__(self, args, command): # type: (t.Any, str) -> None self.command = command + self.interactive = False + self.check_layout = True self.success = None # type: t.Optional[bool] self.color = args.color # type: bool @@ -136,11 +140,11 @@ def __init__(self, args, command): # type: (t.Any, str) -> None self.truncate = args.truncate # type: int self.redact = args.redact # type: bool - self.info_stderr = False # type: bool + self.display_stderr = False # type: bool self.session_name = generate_name() - self.cache = {} + self.cache = {} # type: t.Dict[str, t.Any] def get_ansible_config(self): # type: () -> str """Return the path to the Ansible config for the given config.""" @@ -220,15 +224,8 @@ def process_scoped_temporary_directory(args, prefix='ansible-test-', suffix=None @contextlib.contextmanager -def named_temporary_file(args, prefix, suffix, directory, content): - """ - :param args: CommonConfig - :param prefix: str - :param suffix: str - :param directory: str - :param content: str | bytes | unicode - :rtype: str - """ +def named_temporary_file(args, prefix, suffix, directory, content): # type: (CommonConfig, str, str, t.Optional[str], str) -> t.Iterator[str] + """Context manager for a named temporary file.""" if args.explain: yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix)) else: @@ -243,7 +240,7 @@ def write_json_test_results(category, # type: ResultType name, # type: str content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]] formatted=True, # type: bool - encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]] + encoder=None, # type: t.Optional[t.Type[json.JSONEncoder]] ): # type: (...) -> None """Write the given json content to the specified test results path, creating directories as needed.""" path = os.path.join(category.path, name) @@ -286,9 +283,9 @@ def get_injector_path(): # type: () -> str script = set_shebang(script, shebang) write_text_file(dst, script) - os.chmod(dst, mode) + verified_chmod(dst, mode) - os.chmod(injector_path, MODE_DIRECTORY) + verified_chmod(injector_path, MODE_DIRECTORY) def cleanup_injector(): """Remove the temporary injector directory.""" @@ -349,7 +346,7 @@ def get_python_path(interpreter): # type: (str) -> str create_interpreter_wrapper(interpreter, injected_interpreter) - os.chmod(python_path, MODE_DIRECTORY) + verified_chmod(python_path, MODE_DIRECTORY) if not PYTHON_PATHS: atexit.register(cleanup_python_paths) @@ -387,7 +384,7 @@ def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str write_text_file(injected_interpreter, code) - os.chmod(injected_interpreter, MODE_FILE_EXECUTE) + verified_chmod(injected_interpreter, MODE_FILE_EXECUTE) def cleanup_python_paths(): @@ -402,7 +399,7 @@ def intercept_python( python, # type: PythonConfig cmd, # type: t.List[str] env, # type: t.Dict[str, str] - capture=False, # type: bool + capture, # type: bool data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] always=False, # type: bool @@ -432,26 +429,28 @@ def intercept_python( def run_command( args, # type: CommonConfig cmd, # type: t.Iterable[str] - capture=False, # type: bool + capture, # type: bool env=None, # type: t.Optional[t.Dict[str, str]] data=None, # type: t.Optional[str] cwd=None, # type: t.Optional[str] always=False, # type: bool - stdin=None, # type: t.Optional[t.BinaryIO] - stdout=None, # type: t.Optional[t.BinaryIO] + stdin=None, # type: t.Optional[t.IO[bytes]] + stdout=None, # type: t.Optional[t.IO[bytes]] + interactive=False, # type: bool + output_stream=None, # type: t.Optional[OutputStream] cmd_verbosity=1, # type: int str_errors='strict', # type: str error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]] ): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]] """Run the specified command and return stdout and stderr as a tuple.""" explain = args.explain and not always - return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, - cmd_verbosity=cmd_verbosity, str_errors=str_errors, error_callback=error_callback) + return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, interactive=interactive, + output_stream=output_stream, cmd_verbosity=cmd_verbosity, str_errors=str_errors, error_callback=error_callback) def yamlcheck(python): """Return True if PyYAML has libyaml support, False if it does not and None if it was not found.""" - result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0]) + result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0]) if not result['yaml']: return None diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py index cf436775bdb001..a50f9b54e40190 100644 --- a/test/lib/ansible_test/_internal/venv.py +++ b/test/lib/ansible_test/_internal/venv.py @@ -15,11 +15,12 @@ find_python, SubprocessError, get_available_python_versions, - ANSIBLE_TEST_TOOLS_ROOT, + ANSIBLE_TEST_TARGET_TOOLS_ROOT, display, remove_tree, ApplicationError, str_to_version, + raw_command, ) from .util_common import ( @@ -92,7 +93,7 @@ def create_virtual_environment(args, # type: EnvironmentConfig # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results # in a copy of the original virtual environment instead of creation of a new one # avoid this issue by only using "real" python interpreters to invoke 'venv' - for real_python in iterate_real_pythons(args, python.version): + for real_python in iterate_real_pythons(python.version): if run_venv(args, real_python, system_site_packages, pip, path): display.info('Created Python %s virtual environment using "venv": %s' % (python.version, path), verbosity=1) return True @@ -132,7 +133,7 @@ def create_virtual_environment(args, # type: EnvironmentConfig return False -def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str] +def iterate_real_pythons(version): # type: (str) -> t.Iterable[str] """ Iterate through available real python interpreters of the requested version. The current interpreter will be checked and then the path will be searched. @@ -142,7 +143,7 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. if version_info == sys.version_info[:len(version_info)]: current_python = sys.executable - real_prefix = get_python_real_prefix(args, current_python) + real_prefix = get_python_real_prefix(current_python) if real_prefix: current_python = find_python(version, os.path.join(real_prefix, 'bin')) @@ -163,7 +164,7 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. if found_python == current_python: return - real_prefix = get_python_real_prefix(args, found_python) + real_prefix = get_python_real_prefix(found_python) if real_prefix: found_python = find_python(version, os.path.join(real_prefix, 'bin')) @@ -172,12 +173,12 @@ def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t. yield found_python -def get_python_real_prefix(args, python_path): # type: (EnvironmentConfig, str) -> t.Optional[str] +def get_python_real_prefix(python_path): # type: (str) -> t.Optional[str] """ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'. """ - cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'virtualenvcheck.py'))] - check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0]) + cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'virtualenvcheck.py'))] + check_result = json.loads(raw_command(cmd, capture=True)[0]) real_prefix = check_result['real_prefix'] return real_prefix @@ -205,7 +206,7 @@ def run_venv(args, # type: EnvironmentConfig remove_tree(path) if args.verbosity > 1: - display.error(ex) + display.error(ex.message) return False @@ -241,7 +242,7 @@ def run_virtualenv(args, # type: EnvironmentConfig remove_tree(path) if args.verbosity > 1: - display.error(ex) + display.error(ex.message) return False @@ -249,11 +250,11 @@ def run_virtualenv(args, # type: EnvironmentConfig def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]] - """Get the virtualenv version for the given python intepreter, if available, otherwise return None.""" + """Get the virtualenv version for the given python interpreter, if available, otherwise return None.""" try: - cache = get_virtualenv_version.cache + cache = get_virtualenv_version.cache # type: ignore[attr-defined] except AttributeError: - cache = get_virtualenv_version.cache = {} + cache = get_virtualenv_version.cache = {} # type: ignore[attr-defined] if python not in cache: try: @@ -262,7 +263,7 @@ def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t stdout = '' if args.verbosity > 1: - display.error(ex) + display.error(ex.message) version = None diff --git a/test/lib/ansible_test/_util/__init__.py b/test/lib/ansible_test/_util/__init__.py index d6fc0a8614c065..527d413a98d8f7 100644 --- a/test/lib/ansible_test/_util/__init__.py +++ b/test/lib/ansible_test/_util/__init__.py @@ -1,3 +1,2 @@ -"""Nearly empty __init__.py to allow importing under Python 2.x.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x. +# This allows the ansible-test entry point to report supported Python versions before exiting. diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py index e19b4d98a40552..a319d1a12e4ca2 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py @@ -1,6 +1,5 @@ """Test to verify action plugins have an associated module to provide documentation.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py index 1875ab3aa44dab..983eaeb4266bc3 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys @@ -48,7 +47,11 @@ def main(): env = os.environ.copy() env.update(PYTHONPATH='%s:%s' % (os.path.join(os.path.dirname(__file__), 'changelog'), env['PYTHONPATH'])) - subprocess.call(cmd, env=env) # ignore the return code, rely on the output instead + # ignore the return code, rely on the output instead + process = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, text=True, env=env, check=False) + + sys.stdout.write(process.stdout) + sys.stderr.write(process.stderr) if __name__ == '__main__': diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py index 000c29e4e977f1..7eab0f573a1bea 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py @@ -1,5 +1,4 @@ """Block the sphinx module from being loaded.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations raise ImportError('The sphinx module has been prevented from loading to maintain consistent test results.') diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py index 806c0e6ed1aac1..e0dd41cd5cd1bb 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py index cdad96551eda88..dcb02e7adb9241 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py index 660b0fce858bae..99417b6e720154 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py index e3fba1f5ddb0c1..21cb0017dff386 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py index d6d710aeffb7c0..71883c9f6118f2 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py index 18a3f6d1d11754..bb564564ef25a3 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py index 7dfd5b260179a4..1b728de630c1b1 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py index 8925e831d29d53..9fe6e9ff5c0fb2 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py index 18134154389763..8a955636833aa6 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py index 5a267ba0dfdf54..bf50a4d974214e 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py index 421bbd6229ad2f..36793f3f0c68bc 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py @@ -1,8 +1,7 @@ # a script to check for illegal filenames on various Operating Systems. The # main rules are derived from restrictions on Windows # https://msdn.microsoft.com/en-us/library/aa365247#naming_conventions -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import struct diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py index e5abd64db80027..020c95d6790d38 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py index 8399a36e0bd70e..1ac51710ead9ba 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py index bb8c8f01d828ca..2a3413870ed981 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py index 87575f51895967..8484047c1bd1ea 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py index 929f371f7a2266..74a2b936da1ed3 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py @@ -1,6 +1,5 @@ """Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import datetime import os diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py index 401af1aee6829a..beb0bf7072175d 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py index 5603051ac51902..4bd9d4bf466d46 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py index 68f380b0a9271d..0e2fcfa661dffd 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py index a8f0b879505fd8..d099748470898b 100644 --- a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py +++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py b/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py index 74a45f009f79cf..af11dd8a8be4a7 100644 --- a/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py +++ b/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py @@ -1,6 +1,5 @@ """Read YAML from stdin and write JSON to stdout.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import sys diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg index 3c60aa77fe92a3..54f258a0370494 100644 --- a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg +++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg @@ -9,6 +9,7 @@ disable= no-self-use, raise-missing-from, # Python 2.x does not support raise from too-few-public-methods, + too-many-public-methods, too-many-arguments, too-many-branches, too-many-instance-attributes, diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py index 234ec217cda11f..851d8d75985bbb 100644 --- a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py +++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py @@ -2,8 +2,7 @@ # (c) 2018, Matt Martz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import datetime import re diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py index 3b9a37e549cf1d..934a9ae70ca13d 100644 --- a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py +++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py @@ -2,8 +2,7 @@ # (c) 2018, Matt Martz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import astroid from pylint.interfaces import IAstroidChecker diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py index 75a8b57fff8517..1be42f51f23642 100644 --- a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py +++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py @@ -1,7 +1,5 @@ """A plugin for pylint to identify imports and functions which should not be used.""" -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type +from __future__ import annotations import os import typing as t @@ -23,11 +21,13 @@ def __init__( modules_only=False, # type: bool names=None, # type: t.Optional[t.Tuple[str, ...]] ignore_paths=None, # type: t.Optional[t.Tuple[str, ...]] + ansible_test_only=False, # type: bool ): # type: (...) -> None self.alternative = alternative self.modules_only = modules_only self.names = set(names) if names else set() self.ignore_paths = ignore_paths + self.ansible_test_only = ansible_test_only def applies_to(self, path, name=None): # type: (str, t.Optional[str]) -> bool """Return True if this entry applies to the given path, otherwise return False.""" @@ -41,6 +41,9 @@ def applies_to(self, path, name=None): # type: (str, t.Optional[str]) -> bool if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths): return False + if self.ansible_test_only and '/test/lib/ansible_test/_internal/' not in path: + return False + if self.modules_only: return is_module_path(path) @@ -116,6 +119,10 @@ class AnsibleUnwantedChecker(BaseChecker): # see https://docs.python.org/3/library/tempfile.html#tempfile.mktemp 'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'), + # os.chmod resolves as posix.chmod + 'posix.chmod': UnwantedEntry('verified_chmod', + ansible_test_only=True), + 'sys.exit': UnwantedEntry('exit_json or fail_json', ignore_paths=( '/lib/ansible/module_utils/basic.py', diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/main.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/main.py index e6749cdc61d42d..ee7e832b489643 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/main.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/main.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from validate_modules.main import main diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py index d8ff2dc0d4fd72..c84ed8ac6b7408 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py @@ -15,6 +15,5 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations __version__ = '0.0.1b' diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py index f9eaa02af37873..f4d2a887e8e5e5 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import abc import argparse @@ -431,14 +430,13 @@ def _get_base_file(self): base_path = self._get_base_branch_module_path() command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)] - p = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = p.communicate() + p = subprocess.run(command, stdin=subprocess.DEVNULL, capture_output=True, check=False) + if int(p.returncode) != 0: return None t = tempfile.NamedTemporaryFile(delete=False) - t.write(stdout) + t.write(p.stdout) t.close() return t.name @@ -2424,11 +2422,12 @@ def _get_module_files(): @staticmethod def _git(args): cmd = ['git'] + args - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = p.communicate() + p = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, text=True, check=False) + if p.returncode != 0: - raise GitError(stderr, p.returncode) - return stdout.decode('utf-8').splitlines() + raise GitError(p.stderr, p.returncode) + + return p.stdout.splitlines() class GitError(Exception): diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py index 3846ee5df8a197..571329977326d5 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import runpy import inspect @@ -117,20 +116,18 @@ def get_ps_argument_spec(filename, collection): ps_dep_finder._add_module(name=b"Ansible.ModuleUtils.AddType", ext=".psm1", fqn=None, optional=False, wrapper=False) util_manifest = json.dumps({ - 'module_path': to_text(module_path, errors='surrogiate_or_strict'), + 'module_path': to_text(module_path, errors='surrogate_or_strict'), 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'], 'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]), }) script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1') - proc = subprocess.Popen(['pwsh', script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=False) - stdout, stderr = proc.communicate() + proc = subprocess.run(['pwsh', script_path, util_manifest], stdin=subprocess.DEVNULL, capture_output=True, text=True, check=False) if proc.returncode != 0: - raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8'))) + raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (proc.stdout, proc.stderr)) - kwargs = json.loads(stdout) + kwargs = json.loads(proc.stdout) # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS kwargs['argument_spec'] = kwargs.pop('options', {}) diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py index ed098cbc9caf27..66eb0ea7848c25 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py @@ -3,8 +3,7 @@ # Copyright: (c) 2015, Matt Martz # Copyright: (c) 2015, Rackspace US, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py index ac46f6669f601f..5b20db8da9e3a4 100644 --- a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py +++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import datetime diff --git a/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py b/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py index 7f9df40a7eebbf..f4b361015fd8b4 100644 --- a/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py +++ b/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py @@ -1,6 +1,5 @@ """Wrapper around yamllint that supports YAML embedded in Ansible modules.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import json diff --git a/test/lib/ansible_test/_util/controller/tools/collection_detail.py b/test/lib/ansible_test/_util/controller/tools/collection_detail.py index e7c883ca016025..4ab6631ae211ae 100644 --- a/test/lib/ansible_test/_util/controller/tools/collection_detail.py +++ b/test/lib/ansible_test/_util/controller/tools/collection_detail.py @@ -1,6 +1,5 @@ """Retrieve collection detail.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os diff --git a/test/lib/ansible_test/_util/controller/tools/sslcheck.py b/test/lib/ansible_test/_util/controller/tools/sslcheck.py index 115c5ed25a8c75..c25fed6165ee04 100644 --- a/test/lib/ansible_test/_util/controller/tools/sslcheck.py +++ b/test/lib/ansible_test/_util/controller/tools/sslcheck.py @@ -1,6 +1,5 @@ """Show openssl version.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json diff --git a/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py b/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py index 1164168e3ebe75..e2a15bf00ce493 100644 --- a/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py +++ b/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py @@ -1,6 +1,5 @@ """Read YAML from stdin and write JSON to stdout.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import datetime import json diff --git a/test/lib/ansible_test/_util/target/__init__.py b/test/lib/ansible_test/_util/target/__init__.py index d6fc0a8614c065..527d413a98d8f7 100644 --- a/test/lib/ansible_test/_util/target/__init__.py +++ b/test/lib/ansible_test/_util/target/__init__.py @@ -1,3 +1,2 @@ -"""Nearly empty __init__.py to allow importing under Python 2.x.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x. +# This allows the ansible-test entry point to report supported Python versions before exiting. diff --git a/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py b/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py index dc31095a813497..286e045bcaa73e 100755 --- a/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py +++ b/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py @@ -27,6 +27,9 @@ def main(): raise SystemExit('This version of ansible-test cannot be executed with Python version %s. Supported Python versions are: %s' % ( version_to_str(sys.version_info[:3]), ', '.join(CONTROLLER_PYTHON_VERSIONS))) + if any(not os.get_blocking(handle.fileno()) for handle in (sys.stdin, sys.stdout, sys.stderr)): + raise SystemExit('Standard input, output and error file handles must be blocking to run ansible-test.') + # noinspection PyProtectedMember from ansible_test._internal import main as cli_main diff --git a/test/lib/ansible_test/_util/target/common/__init__.py b/test/lib/ansible_test/_util/target/common/__init__.py index d6fc0a8614c065..527d413a98d8f7 100644 --- a/test/lib/ansible_test/_util/target/common/__init__.py +++ b/test/lib/ansible_test/_util/target/common/__init__.py @@ -1,3 +1,2 @@ -"""Nearly empty __init__.py to allow importing under Python 2.x.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x. +# This allows the ansible-test entry point to report supported Python versions before exiting. diff --git a/test/lib/ansible_test/_util/target/setup/bootstrap.sh b/test/lib/ansible_test/_util/target/setup/bootstrap.sh index 53e2ca7177c28e..0427aac64fcf5e 100644 --- a/test/lib/ansible_test/_util/target/setup/bootstrap.sh +++ b/test/lib/ansible_test/_util/target/setup/bootstrap.sh @@ -80,30 +80,63 @@ pip_install() { done } -bootstrap_remote_aix() +bootstrap_remote_alpine() { - chfs -a size=1G / - chfs -a size=4G /usr - chfs -a size=1G /var - chfs -a size=1G /tmp - chfs -a size=2G /opt + py_pkg_prefix="py3" - if [ "${python_version}" = "2.7" ]; then - python_package_version="" - else - python_package_version="3" + packages=" + acl + bash + gcc + python3-dev + ${py_pkg_prefix}-pip + sudo + " + + if [ "${controller}" ]; then + packages=" + ${packages} + ${py_pkg_prefix}-cryptography + ${py_pkg_prefix}-packaging + ${py_pkg_prefix}-yaml + ${py_pkg_prefix}-jinja2 + ${py_pkg_prefix}-resolvelib + " fi + while true; do + # shellcheck disable=SC2086 + apk add -q ${packages} \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done +} + +bootstrap_remote_fedora() +{ + py_pkg_prefix="python3" + packages=" + acl gcc - python${python_package_version} - python${python_package_version}-devel - python${python_package_version}-pip + ${py_pkg_prefix}-devel " + if [ "${controller}" ]; then + packages=" + ${packages} + ${py_pkg_prefix}-cryptography + ${py_pkg_prefix}-jinja2 + ${py_pkg_prefix}-packaging + ${py_pkg_prefix}-pyyaml + ${py_pkg_prefix}-resolvelib + " + fi + while true; do # shellcheck disable=SC2086 - yum install -q -y ${packages} \ + dnf install -q -y ${packages} \ && break echo "Failed to install packages. Sleeping before trying again..." sleep 10 @@ -190,6 +223,19 @@ bootstrap_remote_freebsd() extra-index-url = https://spare-tire.testing.ansible.com/simple/ prefer-binary = yes " > /etc/pip.conf + + # enable ACL support on the root filesystem (required for become between unprivileged users) + fs_path="/" + fs_device="$(mount -v "${fs_path}" | cut -w -f 1)" + # shellcheck disable=SC2001 + fs_device_escaped=$(echo "${fs_device}" | sed 's|/|\\/|g') + + mount -o acls "${fs_device}" "${fs_path}" + awk 'BEGIN{FS=" "}; /'"${fs_device_escaped}"'/ {gsub(/^rw$/,"rw,acls", $4); print; next} // {print}' /etc/fstab > /etc/fstab.new + mv /etc/fstab.new /etc/fstab + + # enable sudo without a password for the wheel group, allowing ansible to use the sudo become plugin + echo '%wheel ALL=(ALL:ALL) NOPASSWD: ALL' > /usr/local/etc/sudoers.d/ansible-test } bootstrap_remote_macos() @@ -233,6 +279,8 @@ bootstrap_remote_rhel_7() done install_pip + + bootstrap_remote_rhel_pinned_pip_packages } bootstrap_remote_rhel_8() @@ -264,6 +312,38 @@ bootstrap_remote_rhel_8() echo "Failed to install packages. Sleeping before trying again..." sleep 10 done + + bootstrap_remote_rhel_pinned_pip_packages +} + +bootstrap_remote_rhel_9() +{ + py_pkg_prefix="python3" + + packages=" + gcc + ${py_pkg_prefix}-devel + " + + # Jinja2 is not installed with an OS package since the provided version is too old. + # Instead, ansible-test will install it using pip. + if [ "${controller}" ]; then + packages=" + ${packages} + ${py_pkg_prefix}-cryptography + ${py_pkg_prefix}-packaging + ${py_pkg_prefix}-pyyaml + ${py_pkg_prefix}-resolvelib + " + fi + + while true; do + # shellcheck disable=SC2086 + dnf install -q -y ${packages} \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done } bootstrap_remote_rhel() @@ -271,8 +351,12 @@ bootstrap_remote_rhel() case "${platform_version}" in 7.*) bootstrap_remote_rhel_7 ;; 8.*) bootstrap_remote_rhel_8 ;; + 9.*) bootstrap_remote_rhel_9 ;; esac +} +bootstrap_remote_rhel_pinned_pip_packages() +{ # pin packaging and pyparsing to match the downstream vendored versions pip_packages=" packaging==20.4 @@ -282,6 +366,63 @@ bootstrap_remote_rhel() pip_install "${pip_packages}" } +bootstrap_remote_ubuntu() +{ + py_pkg_prefix="python3" + + packages=" + acl + gcc + python${python_version}-dev + python3-pip + python${python_version}-venv + " + + if [ "${controller}" ]; then + cryptography_pkg="${py_pkg_prefix}-cryptography" + jinja2_pkg="${py_pkg_prefix}-jinja2" + packaging_pkg="${py_pkg_prefix}-packaging" + pyyaml_pkg="${py_pkg_prefix}-yaml" + resolvelib_pkg="${py_pkg_prefix}-resolvelib" + + # Declare platforms which do not have supporting OS packages available. + # For these ansible-test will use pip to install the requirements instead. + # Only the platform is checked since Ubuntu shares Python packages across Python versions. + case "${platform_version}" in + "20.04") + jinja2_pkg="" # too old + resolvelib_pkg="" # not available + ;; + esac + + packages=" + ${packages} + ${cryptography_pkg} + ${jinja2_pkg} + ${packaging_pkg} + ${pyyaml_pkg} + ${resolvelib_pkg} + " + fi + + while true; do + # shellcheck disable=SC2086 + apt-get update -qq -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --no-install-recommends ${packages} \ + && break + echo "Failed to install packages. Sleeping before trying again..." + sleep 10 + done + + if [ "${controller}" ]; then + if [ "${platform_version}/${python_version}" = "20.04/3.9" ]; then + # Install pyyaml using pip so libyaml support is available on Python 3.9. + # The OS package install (which is installed by default) only has a .so file for Python 3.8. + pip_install "--upgrade pyyaml" + fi + fi +} + bootstrap_docker() { # Required for newer mysql-server packages to install/upgrade on Ubuntu 16.04. @@ -297,10 +438,12 @@ bootstrap_remote() python_package_version="$(echo "${python_version}" | tr -d '.')" case "${platform}" in - "aix") bootstrap_remote_aix ;; + "alpine") bootstrap_remote_alpine ;; + "fedora") bootstrap_remote_fedora ;; "freebsd") bootstrap_remote_freebsd ;; "macos") bootstrap_remote_macos ;; "rhel") bootstrap_remote_rhel ;; + "ubuntu") bootstrap_remote_ubuntu ;; esac done } @@ -313,6 +456,9 @@ bootstrap() install_ssh_keys customize_bashrc + # allow tests to detect ansible-test bootstrapped instances, as well as the bootstrap type + echo "${bootstrap_type}" > /etc/ansible-test.bootstrap + case "${bootstrap_type}" in "docker") bootstrap_docker ;; "remote") bootstrap_remote ;; diff --git a/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh new file mode 100644 index 00000000000000..3b05a3f444a710 --- /dev/null +++ b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh @@ -0,0 +1,17 @@ +# shellcheck shell=sh + +set -eu + +>&2 echo "@MARKER@" + +cgroup_path="$(awk -F: '$2 ~ /^name=systemd$/ { print "/sys/fs/cgroup/systemd"$3 }' /proc/1/cgroup)" + +if [ "${cgroup_path}" ] && [ -d "${cgroup_path}" ]; then + probe_path="${cgroup_path%/}/ansible-test-probe-@LABEL@" + mkdir "${probe_path}" + rmdir "${probe_path}" + exit 0 +fi + +>&2 echo "No systemd cgroup v1 hierarchy found" +exit 1 diff --git a/test/lib/ansible_test/_util/target/setup/probe_cgroups.py b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py new file mode 100644 index 00000000000000..2ac7ecb0849750 --- /dev/null +++ b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py @@ -0,0 +1,31 @@ +"""A tool for probing cgroups to determine write access.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import sys + + +def main(): # type: () -> None + """Main program entry point.""" + probe_dir = sys.argv[1] + paths = sys.argv[2:] + results = {} + + for path in paths: + probe_path = os.path.join(path, probe_dir) + + try: + os.mkdir(probe_path) + os.rmdir(probe_path) + except Exception as ex: # pylint: disable=broad-except + results[path] = str(ex) + else: + results[path] = None + + print(json.dumps(results, sort_keys=True)) + + +if __name__ == '__main__': + main() diff --git a/test/lib/ansible_test/_util/target/setup/requirements.py b/test/lib/ansible_test/_util/target/setup/requirements.py index 8bac926724ce0c..a7fd21b4843a25 100644 --- a/test/lib/ansible_test/_util/target/setup/requirements.py +++ b/test/lib/ansible_test/_util/target/setup/requirements.py @@ -291,12 +291,11 @@ def make_dirs(path): # type: (str) -> None raise -def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO +def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes] """Open the given path for binary access.""" if 'b' not in mode: raise Exception('mode must include "b" for binary files: %s' % mode) - # noinspection PyTypeChecker return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with diff --git a/test/lib/ansible_test/_util/controller/tools/virtualenvcheck.py b/test/lib/ansible_test/_util/target/tools/virtualenvcheck.py similarity index 100% rename from test/lib/ansible_test/_util/controller/tools/virtualenvcheck.py rename to test/lib/ansible_test/_util/target/tools/virtualenvcheck.py diff --git a/test/lib/ansible_test/_util/controller/tools/yamlcheck.py b/test/lib/ansible_test/_util/target/tools/yamlcheck.py similarity index 100% rename from test/lib/ansible_test/_util/controller/tools/yamlcheck.py rename to test/lib/ansible_test/_util/target/tools/yamlcheck.py diff --git a/test/sanity/code-smell/ansible-requirements.py b/test/sanity/code-smell/ansible-requirements.py index 48ecbaafdbd0e6..4d1a652f2b152c 100644 --- a/test/sanity/code-smell/ansible-requirements.py +++ b/test/sanity/code-smell/ansible-requirements.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/sanity/code-smell/ansible-test-future-boilerplate.json b/test/sanity/code-smell/ansible-test-future-boilerplate.json index e689ba5da1280d..ca4c067ab3214e 100644 --- a/test/sanity/code-smell/ansible-test-future-boilerplate.json +++ b/test/sanity/code-smell/ansible-test-future-boilerplate.json @@ -3,7 +3,8 @@ ".py" ], "prefixes": [ - "test/lib/ansible_test/_internal/" + "test/sanity/", + "test/lib/ansible_test/" ], "output": "path-message" } diff --git a/test/sanity/code-smell/ansible-test-future-boilerplate.py b/test/sanity/code-smell/ansible-test-future-boilerplate.py index 55092a73a3ddff..9a6222519b1789 100644 --- a/test/sanity/code-smell/ansible-test-future-boilerplate.py +++ b/test/sanity/code-smell/ansible-test-future-boilerplate.py @@ -1,12 +1,19 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import sys def main(): + # The following directories contain code which must work under Python 2.x. + py2_compat = ( + 'test/lib/ansible_test/_util/target/', + ) + for path in sys.argv[1:] or sys.stdin.read().splitlines(): + if any(path.startswith(prefix) for prefix in py2_compat): + continue + with open(path, 'rb') as path_fd: lines = path_fd.read().splitlines() @@ -15,11 +22,16 @@ def main(): # Files are allowed to be empty of everything including boilerplate missing = False + invalid_future = [] + for text in lines: if text == b'from __future__ import annotations': missing = False break + if text.startswith(b'from __future__ ') or text == b'__metaclass__ = type': + invalid_future.append(text.decode()) + if missing: with open(path) as file: contents = file.read() @@ -39,6 +51,9 @@ def main(): if missing: print('%s: missing: from __future__ import annotations' % path) + for text in invalid_future: + print('%s: invalid: %s' % (path, text)) + if __name__ == '__main__': main() diff --git a/test/sanity/code-smell/botmeta.py b/test/sanity/code-smell/botmeta.py index 0ab5bf97edd0e7..985c8f9f0c2cf6 100644 --- a/test/sanity/code-smell/botmeta.py +++ b/test/sanity/code-smell/botmeta.py @@ -1,6 +1,5 @@ """Make sure the data in BOTMETA.yml is valid""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import glob import os diff --git a/test/sanity/code-smell/configure-remoting-ps1.py b/test/sanity/code-smell/configure-remoting-ps1.py index bd2161067f5104..fe678008c1ee6e 100644 --- a/test/sanity/code-smell/configure-remoting-ps1.py +++ b/test/sanity/code-smell/configure-remoting-ps1.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/test/sanity/code-smell/deprecated-config.py b/test/sanity/code-smell/deprecated-config.py index 53cb2b93f24360..3c5c64592961d5 100644 --- a/test/sanity/code-smell/deprecated-config.py +++ b/test/sanity/code-smell/deprecated-config.py @@ -16,8 +16,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import mmap import os diff --git a/test/sanity/code-smell/docs-build.py b/test/sanity/code-smell/docs-build.py index ff7d427a05d93f..aaa69378c7f17a 100644 --- a/test/sanity/code-smell/docs-build.py +++ b/test/sanity/code-smell/docs-build.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re @@ -30,13 +29,12 @@ def main(): try: cmd = ['make', 'core_singlehtmldocs'] - sphinx = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=docs_dir) - stdout, stderr = sphinx.communicate() + sphinx = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, cwd=docs_dir, check=False, text=True) finally: shutil.move(tmp, requirements_txt) - stdout = stdout.decode('utf-8') - stderr = stderr.decode('utf-8') + stdout = sphinx.stdout + stderr = sphinx.stderr if sphinx.returncode != 0: sys.stderr.write("Command '%s' failed with status code: %d\n" % (' '.join(cmd), sphinx.returncode)) diff --git a/test/sanity/code-smell/no-unwanted-files.py b/test/sanity/code-smell/no-unwanted-files.py index 1b55c23e6e289a..82f7aff0a0e3a7 100644 --- a/test/sanity/code-smell/no-unwanted-files.py +++ b/test/sanity/code-smell/no-unwanted-files.py @@ -1,6 +1,5 @@ """Prevent unwanted files from being added to the source tree.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys diff --git a/test/sanity/code-smell/obsolete-files.py b/test/sanity/code-smell/obsolete-files.py index 1fd980271cd0b6..3c1a4a4c726cf8 100644 --- a/test/sanity/code-smell/obsolete-files.py +++ b/test/sanity/code-smell/obsolete-files.py @@ -1,6 +1,5 @@ """Prevent files from being added to directories that are now obsolete.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys diff --git a/test/sanity/code-smell/package-data.py b/test/sanity/code-smell/package-data.py index 06f3f9165e0776..81f0c3087ccf8c 100644 --- a/test/sanity/code-smell/package-data.py +++ b/test/sanity/code-smell/package-data.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import contextlib import fnmatch @@ -161,14 +160,15 @@ def clean_repository(file_list): def create_sdist(tmp_dir): """Create an sdist in the repository""" - create = subprocess.Popen( + create = subprocess.run( ['make', 'snapshot', 'SDIST_DIR=%s' % tmp_dir], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, + stdin=subprocess.DEVNULL, + capture_output=True, + text=True, + check=False, ) - stderr = create.communicate()[1] + stderr = create.stderr if create.returncode != 0: raise Exception('make snapshot failed:\n%s' % stderr) @@ -209,15 +209,16 @@ def extract_sdist(sdist_path, tmp_dir): def install_sdist(tmp_dir, sdist_dir): """Install the extracted sdist into the temporary directory""" - install = subprocess.Popen( + install = subprocess.run( ['python', 'setup.py', 'install', '--root=%s' % tmp_dir], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, + stdin=subprocess.DEVNULL, + capture_output=True, + text=True, cwd=os.path.join(tmp_dir, sdist_dir), + check=False, ) - stdout, stderr = install.communicate() + stdout, stderr = install.stdout, install.stderr if install.returncode != 0: raise Exception('sdist install failed:\n%s' % stderr) diff --git a/test/sanity/code-smell/release-names.py b/test/sanity/code-smell/release-names.py index 4e145062027ef8..81d90d81c3a4e1 100644 --- a/test/sanity/code-smell/release-names.py +++ b/test/sanity/code-smell/release-names.py @@ -20,8 +20,7 @@ """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from yaml import safe_load diff --git a/test/sanity/code-smell/required-and-default-attributes.py b/test/sanity/code-smell/required-and-default-attributes.py index d71ddeeb297a24..900829dce7409e 100644 --- a/test/sanity/code-smell/required-and-default-attributes.py +++ b/test/sanity/code-smell/required-and-default-attributes.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/sanity/code-smell/rstcheck.py b/test/sanity/code-smell/rstcheck.py index 7f7028469f1121..99917ca80ef502 100644 --- a/test/sanity/code-smell/rstcheck.py +++ b/test/sanity/code-smell/rstcheck.py @@ -1,6 +1,5 @@ """Sanity test using rstcheck and sphinx.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import subprocess diff --git a/test/sanity/code-smell/test-constraints.py b/test/sanity/code-smell/test-constraints.py index 8383235e150468..9bd2438c5a629a 100644 --- a/test/sanity/code-smell/test-constraints.py +++ b/test/sanity/code-smell/test-constraints.py @@ -1,5 +1,4 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import sys diff --git a/test/sanity/code-smell/update-bundled.py b/test/sanity/code-smell/update-bundled.py index 009f801bfd06b9..4bad77a66725b0 100644 --- a/test/sanity/code-smell/update-bundled.py +++ b/test/sanity/code-smell/update-bundled.py @@ -21,8 +21,7 @@ """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import fnmatch import json diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt index 1dba93b03e48a6..034dd12f871c9e 100644 --- a/test/sanity/ignore.txt +++ b/test/sanity/ignore.txt @@ -132,6 +132,7 @@ lib/ansible/utils/collection_loader/_collection_meta.py pylint:deprecated-class test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level +test/integration/targets/ansible-test-no-tty/ansible_collections/ns/col/vendored_pty.py pep8!skip # vendored code test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-function # ignore, required for testing test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/files/bad.py pylint:ansible-bad-import # ignore, required for testing diff --git a/test/units/test_no_tty.py b/test/units/test_no_tty.py new file mode 100644 index 00000000000000..290c0b922ab806 --- /dev/null +++ b/test/units/test_no_tty.py @@ -0,0 +1,7 @@ +import sys + + +def test_no_tty(): + assert not sys.stdin.isatty() + assert not sys.stdout.isatty() + assert not sys.stderr.isatty() diff --git a/test/utils/shippable/aix.sh b/test/utils/shippable/alpine.sh similarity index 100% rename from test/utils/shippable/aix.sh rename to test/utils/shippable/alpine.sh diff --git a/test/utils/shippable/fedora.sh b/test/utils/shippable/fedora.sh new file mode 120000 index 00000000000000..6ddb77685452b4 --- /dev/null +++ b/test/utils/shippable/fedora.sh @@ -0,0 +1 @@ +remote.sh \ No newline at end of file diff --git a/test/utils/shippable/ubuntu.sh b/test/utils/shippable/ubuntu.sh new file mode 120000 index 00000000000000..6ddb77685452b4 --- /dev/null +++ b/test/utils/shippable/ubuntu.sh @@ -0,0 +1 @@ +remote.sh \ No newline at end of file