From c3b7ccc4cd39d37f5d5c842ff8736b22a0d500b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20El=20Amri?= Date: Sun, 12 Feb 2023 21:31:57 +0100 Subject: [PATCH] Rename module docker_compose2 to docker_compose_v2 --- plugins/modules/docker_compose2.py | 866 --------------- plugins/modules/docker_compose_v2.py | 1514 ++++++++++---------------- 2 files changed, 580 insertions(+), 1800 deletions(-) delete mode 100644 plugins/modules/docker_compose2.py diff --git a/plugins/modules/docker_compose2.py b/plugins/modules/docker_compose2.py deleted file mode 100644 index 4340426d6..000000000 --- a/plugins/modules/docker_compose2.py +++ /dev/null @@ -1,866 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -DOCUMENTATION = ''' - -module: docker_compose2 - -short_description: Manage multi-container Docker applications with Docker Compose. - -author: Léo El Amri (@lel-amri) - -description: - - Uses Docker Compose to start and shutdown services. - - Swarm mode is not supported (thus secrets and configs aren't supported). - - Configuration can be read from a Compose file or inline using the I(definition) option. - - See the examples for more details. - -attributes: - check_mode: - support: none - diff_mode: - support: none - -options: - docker_host: - description: - - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the - TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, - the module will automatically replace C(tcp) in the connection URL with C(https). - - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used - instead. If the environment variable is not set, the default value will be used. - type: str - default: unix://var/run/docker.sock - aliases: [ docker_url ] - project_name: - description: - - Provide a project name. - - Equivalent to C(docker-compose --project-name). - type: str - env_file: - description: - - By default environment files are loaded from a C(.env) file located directly under the I(project_src) directory. - - I(env_file) can be used to specify the path of a custom environment file instead. - - The path is relative to the I(project_src) directory. - - Equivalent to C(docker-compose --env-file). - type: path - files: - description: - - List of Compose files. - - Files are passed to docker-compose in the order given. - - Equivalent to C(docker-compose -f). - type: list - elements: path - profiles: - description: - - List of profiles to enable when starting services. - - Equivalent to C(docker-compose --profile). - type: list - elements: str - state: - description: - - Desired state of the project. - - Specifying C(pulled) is the same as running C(docker-compose pull). - - Specifying C(built) is the same as running C(docker-compose build). - - Specifying C(stopped) is the same as running C(docker-compose stop). - - Specifying C(present) is the same as running C(docker-compose up). - - Specifying C(restarted) is the same as running C(docker-compose restart). - - Specifying C(absent) is the same as running C(docker-compose down). - type: str - default: present - choices: - - absent - - built - - present - - pulled - - restarted - - stopped - services: - description: - - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) - on a subset of services. - - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). - type: list - elements: str - dependencies: - description: - - When I(state) is C(present) specify whether or not to include linked services. - - When false, equivalent to C(docker-compose up --no-deps). - - When I(state) is C(pull) specify whether or not to also pull for linked services. - - When true, equivalent to C(docker-compose pull --include-deps). - type: bool - default: true - definition: - description: - - Compose file describing one or more services, networks and volumes. - - Mutually exclusive with I(files). - type: dict - recreate: - description: - - By default containers will be recreated when their configuration differs from the service definition. - - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. - - Setting to C(always) forces recreation of all existing containers. - - When set to C(never), equivalent to C(docker-compose up --no-recreate). - - When set to C(always), equivalent to C(docker-compose up --force-recreate). - type: str - default: smart - choices: - - always - - never - - smart - build: - description: - - Use with I(state) C(present) to always build images prior to starting the application. - - Equivalent to C(docker-compose up --build). - - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. - - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). - type: bool - default: false - pull: - description: - - Use with I(state) C(present) to always pull images prior to starting the application. - - Equivalent to C(docker-compose up --pull always). - - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). - type: bool - default: false - nocache: - description: - - Use with the I(build) option to ignore the cache during the image build process. - - Equivalent to C(docker-compose build --no-cache). - type: bool - default: false - remove_images: - description: - - Use with I(state) C(absent) to remove all images or only local images. - - Equivalent to C(docker-compose down --rmi all|local). - type: str - choices: - - 'all' - - 'local' - remove_volumes: - description: - - Use with I(state) C(absent) to remove data volumes. - - Equivalent to C(docker-compose down --volumes). - type: bool - default: false - remove_orphans: - description: - - Remove containers for services not defined in the Compose file. - - Equivalent to C(docker-compose up --remove-orphans) or C(docker-compose down --remove-orphans). - type: bool - default: false - timeout: - description: - - Timeout in seconds for container shutdown when attached or when containers are already running. - - By default C(compose) will use a C(10s) timeout unless C(default_grace_period) is defined for a - particular service in the I(project_src). - type: int - default: null - -requirements: - - "docker-compose >= 2.0.0" -''' - -EXAMPLES = ''' -# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the -# flask directory - -- name: Run using a project directory - hosts: localhost - gather_facts: false - tasks: - - name: Tear down existing services - community.docker.docker_compose: - project_src: flask - state: absent - - - name: Create and start services - community.docker.docker_compose: - project_src: flask - register: output - - - ansible.builtin.debug: - var: output - - - name: Run `docker-compose up` again - community.docker.docker_compose: - project_src: flask - build: false - register: output - - - ansible.builtin.debug: - var: output - - - ansible.builtin.assert: - that: not output.changed - - - name: Stop all services - community.docker.docker_compose: - project_src: flask - build: false - stopped: true - register: output - - - ansible.builtin.debug: - var: output - - - ansible.builtin.assert: - that: - - "'stopped' in containers['flask_web_1'] | default([])" - - "'stopped' in containers['flask_db_1'] | default([])" - - - name: Restart services - community.docker.docker_compose: - project_src: flask - build: false - restarted: true - register: output - - - ansible.builtin.debug: - var: output - - - ansible.builtin.assert: - that: - - "'started' in containers['flask_web_1'] | default([])" - - "'started' in containers['flask_db_1'] | default([])" - -- name: Run with inline Compose file version 2 - # https://docs.docker.com/compose/compose-file/compose-file-v2/ - hosts: localhost - gather_facts: false - tasks: - - community.docker.docker_compose: - project_src: flask - state: absent - - - community.docker.docker_compose: - project_name: flask - definition: - version: '2' - services: - db: - image: postgres - web: - build: "{{ playbook_dir }}/flask" - command: "python manage.py runserver 0.0.0.0:8000" - volumes: - - "{{ playbook_dir }}/flask:/code" - ports: - - "8000:8000" - depends_on: - - db - register: output - - - ansible.builtin.debug: - var: output - - - ansible.builtin.assert: - that: - - "'started' in containers['flask_web_1'] | default([])" - - "'started' in containers['flask_db_1'] | default([])" - -- name: Run with inline Compose file version 1 - # https://docs.docker.com/compose/compose-file/compose-file-v1/ - hosts: localhost - gather_facts: false - tasks: - - community.docker.docker_compose: - project_src: flask - state: absent - - - community.docker.docker_compose: - project_name: flask - definition: - db: - image: postgres - web: - build: "{{ playbook_dir }}/flask" - command: "python manage.py runserver 0.0.0.0:8000" - volumes: - - "{{ playbook_dir }}/flask:/code" - ports: - - "8000:8000" - links: - - db - register: output - - - ansible.builtin.debug: - var: output - - - ansible.builtin.assert: - that: - - "'started' in containers['flask_web_1'] | default([])" - - "'started' in containers['flask_db_1'] | default([])" -''' - -RETURN = ''' -stdout: - description: - - The stdout from docker-compose. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: str -stderr: - description: - - The stderr from docker-compose. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: str -containers: - description: - - A dictionary mapping the various status of containers during C(docker-compose) operation. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: complex: - contains: - container_name: - description: Name of the container. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: list - elements: str - example: ["stopped", "removed"] -volumes: - description: - - A dictionary mapping the various status of volumes during C(docker-compose) operation. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: complex: - contains: - volume_name: - description: Name of the volume. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: list - elements: str - example: ["created"] -images: - description: - - A dictionary mapping the various status of volumes during C(docker-compose) operation. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: complex: - contains: - image_name: - description: Name of the image. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: list - elements: str - example: ["removed"] -networks: - description: - - A dictionary mapping the various status of networks during C(docker-compose) operation. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: complex: - contains: - image_name: - description: Name of the image. - returned: always, unless when C(docker-compose) wasn't given the chance to run - type: list - elements: str - example: ["created"] -''' - - -from typing import List, Optional, Tuple, Union, Literal, Final, FrozenSet -import re -from collections import defaultdict -import enum -from dataclasses import dataclass -from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.docker.plugins.module_utils.util import ( - DOCKER_COMMON_ARGS, -) -from ansible.module_utils.common.yaml import HAS_YAML, yaml_dump - - -STATUS_DONE: Final[FrozenSet[str]] = frozenset({ - 'Started', - 'Healthy', - 'Exited', - 'Restarted', - 'Running', - 'Created', - 'Stopped', - 'Killed', - 'Removed', - # An extra, specific to containers - 'Recreated', -}) - - -STATUS_WORKING: Final[FrozenSet[str]] = frozenset({ - 'Creating', - 'Starting', - 'Waiting', - 'Restarting', - 'Stopping', - 'Killing', - 'Removing', - # An extra, specific to containers - 'Recreate', -}) - - -STATUS_ERROR: Final[FrozenSet[str]] = frozenset({ - 'Error', -}) - - -STATUS_THAT_CAUSE_A_CHANGE = { - 'Started', - 'Exited', - 'Restarted', - 'Created', - 'Stopped', - 'Killed', - 'Removed', -} - - -STATUS_DOCKERCOMPOSE_TO_COMMUNITYDOCKER = { - 'Started': 'started', - 'Healthy': 'healthy', - 'Exited': 'exited', - 'Restarted': 'restarted', - 'Running': 'running', - 'Created': 'created', - 'Stopped': 'stopped', - 'Killed': 'killed', - 'Removed': 'removed', - 'Recreated': 'recreated', -} - - -class ResourceType(enum.Enum): - NETWORK = enum.auto() - IMAGE = enum.auto() - VOLUME = enum.auto() - CONTAINER = enum.auto() - - @classmethod - def from_docker_compose_event(cls, resource_type: str, /) -> "ResourceType": - return { - "Network": cls.NETWORK, - "Image": cls.IMAGE, - "Volume": cls.VOLUME, - "Container": cls.CONTAINER, - }[resource_type] - - -@dataclass -class ResourceEvent(object): - resource_type: ResourceType - resource_id: str - status: str - - -_re_resource_event = re.compile(r'^(?PNetwork|Image|Volume|Container) (?P.+) (?P{:s})'.format("|".join(STATUS_DONE | STATUS_WORKING | STATUS_ERROR))) - - -DOCKER_COMPOSE_EXECUTABLE = 'docker-compose' - - -class ComposeManager(object): - def __init__(self, module: AnsibleModule, docker_host: str, /) -> None: - self._docker_host = docker_host - self._module = module - - @staticmethod - def _parse_stderr(stderr: str) -> List[ResourceEvent]: - events: List[ResourceEvent] = [] - for line in stderr.splitlines(): - line = line.rstrip() - if ((match := _re_resource_event.match(line)) is not None): - events.append(ResourceEvent( - resource_type=ResourceType.from_docker_compose_event(match.group('resource_type')), - resource_id=match.group('resource_id'), - status=match.group('status')) - ) - return events - - def _run_subcommand( - self, - subcommand: List[str], - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - ) -> Tuple[int, str, str, List[ResourceEvent]]: - command = [DOCKER_COMPOSE_EXECUTABLE, '--ansi', 'never'] - for file in files: - command.extend(['-f', file]) - if project_name is not None: - command.extend(['-p', project_name]) - if project_directory is not None: - command.extend(['--project-directory', project_directory]) - if env_file is not None: - command.extend(['--env-file', env_file]) - for profile in profiles: - command.extend(['--profile', profile]) - command += subcommand - kwargs = {} - if content is not None: - kwargs['data'] = content - env = { - 'DOCKER_HOST': self._docker_host - } - self._module.debug('DOCKER-COMPOSE command: {!r:s}'.format(command)) - self._module.debug('DOCKER-COMPOSE stdin: {!r:s}'.format(content)) - self._module.debug('DOCKER-COMPOSE env: {!r:s}'.format(env)) - rc, out, err = self._module.run_command( - command, - environ_update=env, - **kwargs, - ) - self._module.debug('DOCKER-COMPOSE rc: {:d}'.format(rc)) - self._module.debug('DOCKER-COMPOSE stdout: {!r:s}'.format(out)) - self._module.debug('DOCKER-COMPOSE stderr: {!r:s}'.format(err)) - events = self._parse_stderr(err) - return rc, out, err, events - - def up( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - services: List[str] = [], - no_deps: bool = False, - pull: Optional[Union[Literal['always'], Literal['missing'], Literal['never']]] = None, - build: bool = False, - force_recreate: bool = False, - no_recreate: bool = False, - remove_orphans: bool = False, - timeout: Optional[int] = None, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['up', '-d'] - if no_deps: - subcommand.append('--no-deps') - if pull: - subcommand.extend(['--pull', pull]) - if build: - subcommand.append('--build') - if force_recreate: - subcommand.append('--force-recreate') - if no_recreate: - subcommand.append('--no-recreate') - if remove_orphans: - subcommand.append('--remove-orphans') - if timeout is not None: - subcommand.extend(['--timeout', '{:d}'.format(timeout)]) - for service in services: - subcommand.append(service) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - def down( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - remove_orphans: bool = False, - rmi: Optional[Union[Literal['all'], Literal['local']]] = None, - volumes: bool = False, - timeout: Optional[int] = None, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['down'] - if remove_orphans: - subcommand.append('--remove-orphans') - if rmi: - subcommand.extend(['--rmi', rmi]) - if volumes: - subcommand.extend(['--volumes']) - if timeout is not None: - subcommand.extend(['--timeout', '{:d}'.format(timeout)]) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - def stop( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - timeout: Optional[int] = None, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['stop'] - if timeout is not None: - subcommand.extend(['--timeout', '{:d}'.format(timeout)]) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - def restart( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - services: List[str] = [], - timeout: Optional[int] = None, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['restart'] - if timeout is not None: - subcommand.extend(['--timeout', '{:d}'.format(timeout)]) - for service in services: - subcommand.append(service) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - def build( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - services: List[str] = [], - no_cache: bool = False, - pull: bool = False, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['build'] - if no_cache: - subcommand.append('--no-cache') - if pull: - subcommand.append('--pull') - for service in services: - subcommand.append(service) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - def pull( - self, - # Common arguments - files: List[str], - /, - content: Optional[str] = None, - *, - project_name: Optional[str] = None, - project_directory: Optional[str] = None, - profiles: List[str] = [], - env_file: Optional[str], - # Specific arguments - services: List[str] = [], - include_deps: bool = False, - ) -> Tuple[int, str, str, List[ResourceEvent]]: - subcommand = ['pull'] - if include_deps: - subcommand.append('--include-deps') - for service in services: - subcommand.append(service) - return self._run_subcommand( - subcommand, - files, - content, - project_name=project_name, - project_directory=project_directory, - profiles=profiles, - env_file=env_file, - ) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - docker_host=DOCKER_COMMON_ARGS['docker_host'], - project_src=dict(type='path'), - project_name=dict(type='str',), - env_file=dict(type='path'), - files=dict(type='list', elements='path'), - profiles=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['absent', 'present', 'built', 'pulled', 'restarted', 'stopped']), - definition=dict(type='dict'), - recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), - build=dict(type='bool', default=False), - remove_images=dict(type='str', choices=['all', 'local']), - remove_volumes=dict(type='bool', default=False), - remove_orphans=dict(type='bool', default=False), - stopped=dict(type='bool', default=False), - restarted=dict(type='bool', default=False), - services=dict(type='list', elements='str'), - dependencies=dict(type='bool', default=True), - pull=dict(type='bool', default=False), - nocache=dict(type='bool', default=False), - timeout=dict(type='int') - ), - mutually_exclusive=[ - ('definition', 'files'), - ], - required_by={ - 'definition': ('project_src', ), - }, - required_one_of=[ - ('files', 'definition'), - ], - ) - if bool(module.params['stopped']) and bool(module.params['restarted']): - module.fail_json(changed=False, msg='Cannot use restarted and stopped at the same time.') - changed = False - compose = ComposeManager(module, module.params['docker_host']) - if module.params['definition'] is not None: - if not HAS_YAML: - raise Exception('Require YAML support') - common_args = [ - ['-'], - yaml_dump(module.params['definition']), - ] - else: - common_args = [ - module.params['files'], - ] - common_kwargs = dict( - project_name=module.params['project_name'], - project_directory=module.params['project_src'], - profiles=module.params['profiles'] or [], - env_file=module.params['env_file'], - ) - if module.params['state'] == 'present': - rc, out, err, events = compose.up( - *common_args, - **common_kwargs, - services=module.params['services'] or [], - no_deps=not module.params['dependencies'], - pull='always' if module.params['pull'] else None, - build=module.params['build'], - force_recreate=module.params['recreate'] == "always", - no_recreate=module.params['recreate'] == "never", - remove_orphans=module.params['remove_orphans'], - timeout=module.params['timeout'], - ) - elif module.params['state'] == 'stopped': - rc, out, err, events = compose.stop( - *common_args, - **common_kwargs, - timeout=module.params['timeout'], - ) - elif module.params['state'] == 'restarted': - rc, out, err, events = compose.restart( - *common_args, - **common_kwargs, - services=module.params['services'] or [], - timeout=module.params['timeout'], - ) - elif module.params['state'] == 'built': - rc, out, err, events = compose.build( - *common_args, - **common_kwargs, - services=module.params['services'] or [], - no_cache=not module.params['nocache'], - pull=module.params['pull'], - ) - elif module.params['state'] == 'pulled': - rc, out, err, events = compose.pull( - *common_args, - **common_kwargs, - services=module.params['services'] or [], - include_deps=module.params['dependencies'], - ) - changed = True # We cannot detect change from docker-compose stderr - elif module.params['state'] == 'absent': - rc, out, err, events = compose.down( - *common_args, - **common_kwargs, - remove_orphans=module.params['remove_orphans'], - rmi=module.params['remove_images'], - volumes=module.params['remove_volumes'], - timeout=module.params['timeout'], - ) - else: - assert False # DEAD CODE - networks_states = defaultdict(list) - images_states = defaultdict(list) - volumes_states = defaultdict(list) - containers_states = defaultdict(list) - for event in events: - collection = { - ResourceType.NETWORK: networks_states, - ResourceType.IMAGE: images_states, - ResourceType.VOLUME: volumes_states, - ResourceType.CONTAINER: containers_states, - }[event.resource_type] - if event.status not in STATUS_DONE: - continue - if event.status in STATUS_THAT_CAUSE_A_CHANGE: - changed = True - collection[event.resource_id].append(STATUS_DOCKERCOMPOSE_TO_COMMUNITYDOCKER[event.status]) - result = dict( - changed=changed, - networks={k: list(v) for k, v in networks_states.items()}, - images={k: list(v) for k, v in images_states.items()}, - volumes={k: list(v) for k, v in volumes_states.items()}, - containers={k: list(v) for k, v in containers_states.items()}, - stdout=out, - stderr=err, - ) - if rc != 0: - result['msg'] = "docker-compose exited with code {:d}. Read stderr for more information.".format(rc) - module.fail_json(**result) - else: - module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/docker_compose_v2.py b/plugins/modules/docker_compose_v2.py index 14ea83310..4340426d6 100644 --- a/plugins/modules/docker_compose_v2.py +++ b/plugins/modules/docker_compose_v2.py @@ -1,87 +1,81 @@ #!/usr/bin/python -# -# Copyright 2016 Red Hat | Ansible -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- DOCUMENTATION = ''' -module: docker_compose +module: docker_compose2 short_description: Manage multi-container Docker applications with Docker Compose. -author: "Chris Houseknecht (@chouseknecht)" +author: Léo El Amri (@lel-amri) description: - - Uses Docker Compose to start, shutdown and scale services. B(This module requires docker-compose < 2.0.0.) - - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option. + - Uses Docker Compose to start and shutdown services. + - Swarm mode is not supported (thus secrets and configs aren't supported). + - Configuration can be read from a Compose file or inline using the I(definition) option. - See the examples for more details. - - Supports check mode. - - This module was called C(docker_service) before Ansible 2.8. The usage did not change. - -extends_documentation_fragment: - - community.docker.docker - - community.docker.docker.docker_py_1_documentation - - community.docker.attributes - - community.docker.attributes.actiongroup_docker attributes: check_mode: - support: full + support: none diff_mode: support: none options: - project_src: + docker_host: description: - - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file. - - Mutually exclusive with I(definition). - - Required when no I(definition) is provided. - type: path + - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the + TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + the module will automatically replace C(tcp) in the connection URL with C(https). + - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used + instead. If the environment variable is not set, the default value will be used. + type: str + default: unix://var/run/docker.sock + aliases: [ docker_url ] project_name: description: - - Provide a project name. If not provided, the project name is taken from the basename of I(project_src). - - Required when I(definition) is provided. + - Provide a project name. + - Equivalent to C(docker-compose --project-name). type: str env_file: description: - By default environment files are loaded from a C(.env) file located directly under the I(project_src) directory. - I(env_file) can be used to specify the path of a custom environment file instead. - The path is relative to the I(project_src) directory. - - Requires C(docker-compose) version 1.25.0 or greater. - - "Note: C(docker-compose) versions C(<=1.28) load the env file from the current working directory of the - C(docker-compose) command rather than I(project_src)." + - Equivalent to C(docker-compose --env-file). type: path - version_added: 1.9.0 files: description: - - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). - - Files are loaded and merged in the order given. + - List of Compose files. + - Files are passed to docker-compose in the order given. + - Equivalent to C(docker-compose -f). type: list elements: path profiles: description: - List of profiles to enable when starting services. - Equivalent to C(docker-compose --profile). - - Requires C(docker-compose) version 1.28.0 or greater. type: list elements: str - version_added: 1.8.0 state: description: - Desired state of the project. - - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) - (with I(restarted)). + - Specifying C(pulled) is the same as running C(docker-compose pull). + - Specifying C(built) is the same as running C(docker-compose build). + - Specifying C(stopped) is the same as running C(docker-compose stop). + - Specifying C(present) is the same as running C(docker-compose up). + - Specifying C(restarted) is the same as running C(docker-compose restart). - Specifying C(absent) is the same as running C(docker-compose down). type: str default: present choices: - absent + - built - present + - pulled + - restarted + - stopped services: description: - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) @@ -89,31 +83,26 @@ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). type: list elements: str - scale: - description: - - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key - is the name of the service and the value is an integer count for the number of containers. - type: dict dependencies: description: - When I(state) is C(present) specify whether or not to include linked services. + - When false, equivalent to C(docker-compose up --no-deps). + - When I(state) is C(pull) specify whether or not to also pull for linked services. + - When true, equivalent to C(docker-compose pull --include-deps). type: bool default: true definition: description: - Compose file describing one or more services, networks and volumes. - - Mutually exclusive with I(project_src) and I(files). + - Mutually exclusive with I(files). type: dict - hostname_check: - description: - - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate. - type: bool - default: false recreate: description: - By default containers will be recreated when their configuration differs from the service definition. - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. - Setting to C(always) forces recreation of all existing containers. + - When set to C(never), equivalent to C(docker-compose up --no-recreate). + - When set to C(always), equivalent to C(docker-compose up --force-recreate). type: str default: smart choices: @@ -123,27 +112,28 @@ build: description: - Use with I(state) C(present) to always build images prior to starting the application. - - Same as running C(docker-compose build) with the pull option. + - Equivalent to C(docker-compose up --build). - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. - - Use the I(nocache) option to ignore the image cache when performing the build. - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). type: bool default: false pull: description: - Use with I(state) C(present) to always pull images prior to starting the application. - - Same as running C(docker-compose pull). + - Equivalent to C(docker-compose up --pull always). - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). type: bool default: false nocache: description: - Use with the I(build) option to ignore the cache during the image build process. + - Equivalent to C(docker-compose build --no-cache). type: bool default: false remove_images: description: - Use with I(state) C(absent) to remove all images or only local images. + - Equivalent to C(docker-compose down --rmi all|local). type: str choices: - 'all' @@ -151,25 +141,13 @@ remove_volumes: description: - Use with I(state) C(absent) to remove data volumes. - type: bool - default: false - stopped: - description: - - Use with I(state) C(present) to stop all containers defined in the Compose file. - - If I(services) is defined, only the containers listed there will be stopped. - - Requires C(docker-compose) version 1.17.0 or greater for full support. For older versions, the services will - first be started and then stopped when the service is supposed to be created as stopped. - type: bool - default: false - restarted: - description: - - Use with I(state) C(present) to restart all containers defined in the Compose file. - - If I(services) is defined, only the containers listed there will be restarted. + - Equivalent to C(docker-compose down --volumes). type: bool default: false remove_orphans: description: - Remove containers for services not defined in the Compose file. + - Equivalent to C(docker-compose up --remove-orphans) or C(docker-compose down --remove-orphans). type: bool default: false timeout: @@ -179,15 +157,9 @@ particular service in the I(project_src). type: int default: null - use_ssh_client: - description: - - Currently ignored for this module, but might suddenly be supported later on. requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" - - "docker-compose >= 1.7.0, < 2.0.0" - - "Docker API >= 1.25" - - "PyYAML >= 3.11" + - "docker-compose >= 2.0.0" ''' EXAMPLES = ''' @@ -235,8 +207,8 @@ - ansible.builtin.assert: that: - - "not output.services.web.flask_web_1.state.running" - - "not output.services.db.flask_db_1.state.running" + - "'stopped' in containers['flask_web_1'] | default([])" + - "'stopped' in containers['flask_db_1'] | default([])" - name: Restart services community.docker.docker_compose: @@ -250,21 +222,8 @@ - ansible.builtin.assert: that: - - "output.services.web.flask_web_1.state.running" - - "output.services.db.flask_db_1.state.running" - -- name: Scale the web service to 2 - hosts: localhost - gather_facts: false - tasks: - - community.docker.docker_compose: - project_src: flask - scale: - web: 2 - register: output - - - ansible.builtin.debug: - var: output + - "'started' in containers['flask_web_1'] | default([])" + - "'started' in containers['flask_db_1'] | default([])" - name: Run with inline Compose file version 2 # https://docs.docker.com/compose/compose-file/compose-file-v2/ @@ -298,8 +257,8 @@ - ansible.builtin.assert: that: - - "output.services.web.flask_web_1.state.running" - - "output.services.db.flask_db_1.state.running" + - "'started' in containers['flask_web_1'] | default([])" + - "'started' in containers['flask_db_1'] | default([])" - name: Run with inline Compose file version 1 # https://docs.docker.com/compose/compose-file/compose-file-v1/ @@ -331,889 +290,576 @@ - ansible.builtin.assert: that: - - "output.services.web.flask_web_1.state.running" - - "output.services.db.flask_db_1.state.running" + - "'started' in containers['flask_web_1'] | default([])" + - "'started' in containers['flask_db_1'] | default([])" ''' RETURN = ''' -services: +stdout: description: - - A dictionary mapping the service's name to a dictionary of containers. - returned: success - type: complex + - The stdout from docker-compose. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: str +stderr: + description: + - The stderr from docker-compose. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: str +containers: + description: + - A dictionary mapping the various status of containers during C(docker-compose) operation. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: complex: + contains: + container_name: + description: Name of the container. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: list + elements: str + example: ["stopped", "removed"] +volumes: + description: + - A dictionary mapping the various status of volumes during C(docker-compose) operation. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: complex: contains: - container_name: - description: Name of the container. Format is C(project_service_#). - returned: success - type: complex - contains: - cmd: - description: One or more commands to be executed in the container. - returned: success - type: list - elements: str - example: ["postgres"] - image: - description: Name of the image from which the container was built. - returned: success - type: str - example: postgres - labels: - description: Meta data assigned to the container. - returned: success - type: dict - example: {...} - networks: - description: Contains a dictionary for each network to which the container is a member. - returned: success - type: list - elements: dict - contains: - IPAddress: - description: The IP address assigned to the container. - returned: success - type: str - example: 172.17.0.2 - IPPrefixLen: - description: Number of bits used by the subnet. - returned: success - type: int - example: 16 - aliases: - description: Aliases assigned to the container by the network. - returned: success - type: list - elements: str - example: ['db'] - globalIPv6: - description: IPv6 address assigned to the container. - returned: success - type: str - example: '' - globalIPv6PrefixLen: - description: IPv6 subnet length. - returned: success - type: int - example: 0 - links: - description: List of container names to which this container is linked. - returned: success - type: list - elements: str - example: null - macAddress: - description: Mac Address assigned to the virtual NIC. - returned: success - type: str - example: "02:42:ac:11:00:02" - state: - description: Information regarding the current disposition of the container. - returned: success - type: dict - contains: - running: - description: Whether or not the container is up with a running process. - returned: success - type: bool - example: true - status: - description: Description of the running state. - returned: success - type: str - example: running - -actions: - description: Provides the actions to be taken on each service as determined by compose. - returned: when in check mode or I(debug) is C(true) - type: complex + volume_name: + description: Name of the volume. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: list + elements: str + example: ["created"] +images: + description: + - A dictionary mapping the various status of volumes during C(docker-compose) operation. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: complex: + contains: + image_name: + description: Name of the image. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: list + elements: str + example: ["removed"] +networks: + description: + - A dictionary mapping the various status of networks during C(docker-compose) operation. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: complex: contains: - service_name: - description: Name of the service. - returned: always - type: complex - contains: - pulled_image: - description: Provides image details when a new image is pulled for the service. - returned: on image pull - type: complex - contains: - name: - description: name of the image - returned: always - type: str - id: - description: image hash - returned: always - type: str - built_image: - description: Provides image details when a new image is built for the service. - returned: on image build - type: complex - contains: - name: - description: name of the image - returned: always - type: str - id: - description: image hash - returned: always - type: str - - action: - description: A descriptive name of the action to be performed on the service's containers. - returned: always - type: list - elements: str - contains: - id: - description: the container's long ID - returned: always - type: str - name: - description: the container's name - returned: always - type: str - short_id: - description: the container's short ID - returned: always - type: str + image_name: + description: Name of the image. + returned: always, unless when C(docker-compose) wasn't given the chance to run + type: list + elements: str + example: ["created"] ''' -import os -import re -import sys -import tempfile -import traceback -from contextlib import contextmanager - -from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion - -try: - import yaml - HAS_YAML = True - HAS_YAML_EXC = None -except ImportError as dummy: - HAS_YAML = False - HAS_YAML_EXC = traceback.format_exc() - -try: - from docker.errors import DockerException -except ImportError: - # missing Docker SDK for Python handled in ansible.module_utils.docker.common - pass - -try: - from compose import __version__ as compose_version - from compose.cli.command import project_from_options - from compose.service import NoSuchImageError - from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt - from compose.const import LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF - HAS_COMPOSE = True - HAS_COMPOSE_EXC = None - MINIMUM_COMPOSE_VERSION = '1.7.0' -except ImportError as dummy: - HAS_COMPOSE = False - HAS_COMPOSE_EXC = traceback.format_exc() - -from ansible.module_utils.common.text.converters import to_native - -from ansible_collections.community.docker.plugins.module_utils.common import ( - AnsibleDockerClient, - RequestException, -) +from typing import List, Optional, Tuple, Union, Literal, Final, FrozenSet +import re +from collections import defaultdict +import enum +from dataclasses import dataclass +from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.docker.plugins.module_utils.util import ( - DockerBaseClass, + DOCKER_COMMON_ARGS, ) - - -AUTH_PARAM_MAPPING = { - u'docker_host': u'--host', - u'tls': u'--tls', - u'cacert_path': u'--tlscacert', - u'cert_path': u'--tlscert', - u'key_path': u'--tlskey', - u'tls_verify': u'--tlsverify' +from ansible.module_utils.common.yaml import HAS_YAML, yaml_dump + + +STATUS_DONE: Final[FrozenSet[str]] = frozenset({ + 'Started', + 'Healthy', + 'Exited', + 'Restarted', + 'Running', + 'Created', + 'Stopped', + 'Killed', + 'Removed', + # An extra, specific to containers + 'Recreated', +}) + + +STATUS_WORKING: Final[FrozenSet[str]] = frozenset({ + 'Creating', + 'Starting', + 'Waiting', + 'Restarting', + 'Stopping', + 'Killing', + 'Removing', + # An extra, specific to containers + 'Recreate', +}) + + +STATUS_ERROR: Final[FrozenSet[str]] = frozenset({ + 'Error', +}) + + +STATUS_THAT_CAUSE_A_CHANGE = { + 'Started', + 'Exited', + 'Restarted', + 'Created', + 'Stopped', + 'Killed', + 'Removed', } -@contextmanager -def stdout_redirector(path_name): - old_stdout = sys.stdout - fd = open(path_name, 'w') - sys.stdout = fd - try: - yield - finally: - sys.stdout = old_stdout - - -@contextmanager -def stderr_redirector(path_name): - old_fh = sys.stderr - fd = open(path_name, 'w') - sys.stderr = fd - try: - yield - finally: - sys.stderr = old_fh - - -def make_redirection_tempfiles(): - dummy, out_redir_name = tempfile.mkstemp(prefix="ansible") - dummy, err_redir_name = tempfile.mkstemp(prefix="ansible") - return (out_redir_name, err_redir_name) - +STATUS_DOCKERCOMPOSE_TO_COMMUNITYDOCKER = { + 'Started': 'started', + 'Healthy': 'healthy', + 'Exited': 'exited', + 'Restarted': 'restarted', + 'Running': 'running', + 'Created': 'created', + 'Stopped': 'stopped', + 'Killed': 'killed', + 'Removed': 'removed', + 'Recreated': 'recreated', +} -def cleanup_redirection_tempfiles(out_name, err_name): - for i in [out_name, err_name]: - os.remove(i) +class ResourceType(enum.Enum): + NETWORK = enum.auto() + IMAGE = enum.auto() + VOLUME = enum.auto() + CONTAINER = enum.auto() -def get_redirected_output(path_name): - output = [] - with open(path_name, 'r') as fd: - for line in fd: - # strip terminal format/color chars - new_line = re.sub(r'\x1b\[.+m', '', line) - output.append(new_line) - os.remove(path_name) - return output + @classmethod + def from_docker_compose_event(cls, resource_type: str, /) -> "ResourceType": + return { + "Network": cls.NETWORK, + "Image": cls.IMAGE, + "Volume": cls.VOLUME, + "Container": cls.CONTAINER, + }[resource_type] -def attempt_extract_errors(exc_str, stdout, stderr): - errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')] - errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')]) +@dataclass +class ResourceEvent(object): + resource_type: ResourceType + resource_id: str + status: str - warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')] - warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')]) - # assume either the exception body (if present) or the last warning was the 'most' - # fatal. +_re_resource_event = re.compile(r'^(?PNetwork|Image|Volume|Container) (?P.+) (?P{:s})'.format("|".join(STATUS_DONE | STATUS_WORKING | STATUS_ERROR))) - if exc_str.strip(): - msg = exc_str.strip() - elif errors: - msg = errors[-1].encode('utf-8') - else: - msg = 'unknown cause' - return { - 'warnings': [to_native(w) for w in warnings], - 'errors': [to_native(e) for e in errors], - 'msg': msg, - 'module_stderr': ''.join(stderr), - 'module_stdout': ''.join(stdout) - } +DOCKER_COMPOSE_EXECUTABLE = 'docker-compose' -def get_failure_info(exc, out_name, err_name=None, msg_format='%s'): - if err_name is None: - stderr = [] - else: - stderr = get_redirected_output(err_name) - stdout = get_redirected_output(out_name) - - reason = attempt_extract_errors(str(exc), stdout, stderr) - reason['msg'] = msg_format % reason['msg'] - return reason - - -class ContainerManager(DockerBaseClass): - - def __init__(self, client): - - super(ContainerManager, self).__init__() - - self.client = client - self.project_src = None - self.files = None - self.project_name = None - self.state = None - self.definition = None - self.hostname_check = None - self.timeout = None - self.remove_images = None - self.remove_orphans = None - self.remove_volumes = None - self.stopped = None - self.restarted = None - self.recreate = None - self.build = None - self.dependencies = None - self.services = None - self.scale = None - self.debug = None - self.pull = None - self.nocache = None - - for key, value in client.module.params.items(): - setattr(self, key, value) - - self.check_mode = client.check_mode - - if not self.debug: - self.debug = client.module._debug - - self.options = dict() - self.options.update(self._get_auth_options()) - self.options[u'--skip-hostname-check'] = (not self.hostname_check) - - if self.project_name: - self.options[u'--project-name'] = self.project_name - - if self.env_file: - self.options[u'--env-file'] = self.env_file - - if self.files: - self.options[u'--file'] = self.files - - if self.profiles: - self.options[u'--profile'] = self.profiles - - if not HAS_COMPOSE: - self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % - to_native(HAS_COMPOSE_EXC)) - - if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): - self.client.fail("Found docker-compose version %s. Minimum required version is %s. " - "Upgrade docker-compose to a min version of %s." % - (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) - - if self.restarted and self.stopped: - self.client.fail("Cannot use restarted and stopped at the same time.") - - self.log("options: ") - self.log(self.options, pretty_print=True) - - if self.definition: - if not HAS_YAML: - self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % to_native(HAS_YAML_EXC)) - - if not self.project_name: - self.client.fail("Parameter error - project_name required when providing definition.") - - self.project_src = tempfile.mkdtemp(prefix="ansible") - compose_file = os.path.join(self.project_src, "docker-compose.yml") - try: - self.log('writing: ') - self.log(yaml.dump(self.definition, default_flow_style=False)) - with open(compose_file, 'w') as f: - f.write(yaml.dump(self.definition, default_flow_style=False)) - except Exception as exc: - self.client.fail("Error writing to %s - %s" % (compose_file, to_native(exc))) - else: - if not self.project_src: - self.client.fail("Parameter error - project_src required.") - - try: - self.log("project_src: %s" % self.project_src) - self.project = project_from_options(self.project_src, self.options) - except Exception as exc: - self.client.fail("Configuration error - %s" % to_native(exc)) - - def exec_module(self): - result = dict() - - if self.state == 'present': - result = self.cmd_up() - elif self.state == 'absent': - result = self.cmd_down() - - if self.definition: - compose_file = os.path.join(self.project_src, "docker-compose.yml") - self.log("removing %s" % compose_file) - os.remove(compose_file) - self.log("removing %s" % self.project_src) - os.rmdir(self.project_src) - - if not self.check_mode and not self.debug and result.get('actions'): - result.pop('actions') - - return result - - def _get_auth_options(self): - options = dict() - for key, value in self.client.auth_params.items(): - if value is not None: - option = AUTH_PARAM_MAPPING.get(key) - if option: - options[option] = value - return options - - def cmd_up(self): - - start_deps = self.dependencies - service_names = self.services - detached = True - result = dict(changed=False, actions=[], services=dict()) - - up_options = { - u'--no-recreate': False, - u'--build': False, - u'--no-build': False, - u'--no-deps': False, - u'--force-recreate': False, - } +class ComposeManager(object): + def __init__(self, module: AnsibleModule, docker_host: str, /) -> None: + self._docker_host = docker_host + self._module = module - if self.recreate == 'never': - up_options[u'--no-recreate'] = True - elif self.recreate == 'always': - up_options[u'--force-recreate'] = True - - if self.remove_orphans: - up_options[u'--remove-orphans'] = True - - converge = convergence_strategy_from_opts(up_options) - self.log("convergence strategy: %s" % converge) - - if self.pull: - pull_output = self.cmd_pull() - result['changed'] |= pull_output['changed'] - result['actions'] += pull_output['actions'] - - if self.build: - build_output = self.cmd_build() - result['changed'] |= build_output['changed'] - result['actions'] += build_output['actions'] - - if self.remove_orphans: - containers = self.client.containers( - filters={ - 'label': [ - '{0}={1}'.format(LABEL_PROJECT, self.project.name), - '{0}={1}'.format(LABEL_ONE_OFF, "False") - ], - } - ) - - orphans = [] - for container in containers: - service_name = container.get('Labels', {}).get(LABEL_SERVICE) - if service_name not in self.project.service_names: - orphans.append(service_name) - - if orphans: - result['changed'] = True - - for service in self.project.services: - if not service_names or service.name in service_names: - plan = service.convergence_plan(strategy=converge) - if plan.action == 'start' and self.stopped: - # In case the only action is starting, and the user requested - # that the service should be stopped, ignore this service. - continue - if not self._service_profile_enabled(service): - continue - if plan.action != 'noop': - result['changed'] = True - result_action = dict(service=service.name) - result_action[plan.action] = [] - for container in plan.containers: - result_action[plan.action].append(dict( - id=container.id, - name=container.name, - short_id=container.short_id, - )) - result['actions'].append(result_action) - - if not self.check_mode and result['changed']: - out_redir_name, err_redir_name = make_redirection_tempfiles() - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - do_build = build_action_from_opts(up_options) - self.log('Setting do_build to %s' % do_build) - up_kwargs = { - 'service_names': service_names, - 'start_deps': start_deps, - 'strategy': converge, - 'do_build': do_build, - 'detached': detached, - 'remove_orphans': self.remove_orphans, - 'timeout': self.timeout, - } - - if LooseVersion(compose_version) >= LooseVersion('1.17.0'): - up_kwargs['start'] = not self.stopped - elif self.stopped: - self.client.module.warn( - "The 'stopped' option requires docker-compose version >= 1.17.0. " + - "This task was run with docker-compose version %s." % compose_version - ) - - self.project.up(**up_kwargs) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error starting project %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - - if self.stopped: - stop_output = self.cmd_stop(service_names) - result['changed'] |= stop_output['changed'] - result['actions'] += stop_output['actions'] - - if self.restarted: - restart_output = self.cmd_restart(service_names) - result['changed'] |= restart_output['changed'] - result['actions'] += restart_output['actions'] - - if self.scale: - scale_output = self.cmd_scale() - result['changed'] |= scale_output['changed'] - result['actions'] += scale_output['actions'] - - for service in self.project.services: - service_facts = dict() - result['services'][service.name] = service_facts - for container in service.containers(stopped=True): - inspection = container.inspect() - # pare down the inspection data to the most useful bits - facts = dict( - cmd=[], - labels=dict(), - image=None, - state=dict( - running=None, - status=None - ), - networks=dict() + @staticmethod + def _parse_stderr(stderr: str) -> List[ResourceEvent]: + events: List[ResourceEvent] = [] + for line in stderr.splitlines(): + line = line.rstrip() + if ((match := _re_resource_event.match(line)) is not None): + events.append(ResourceEvent( + resource_type=ResourceType.from_docker_compose_event(match.group('resource_type')), + resource_id=match.group('resource_id'), + status=match.group('status')) ) - if inspection['Config'].get('Cmd', None) is not None: - facts['cmd'] = inspection['Config']['Cmd'] - if inspection['Config'].get('Labels', None) is not None: - facts['labels'] = inspection['Config']['Labels'] - if inspection['Config'].get('Image', None) is not None: - facts['image'] = inspection['Config']['Image'] - if inspection['State'].get('Running', None) is not None: - facts['state']['running'] = inspection['State']['Running'] - if inspection['State'].get('Status', None) is not None: - facts['state']['status'] = inspection['State']['Status'] - - if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): - networks = inspection['NetworkSettings']['Networks'] - for key in networks: - facts['networks'][key] = dict( - aliases=[], - globalIPv6=None, - globalIPv6PrefixLen=0, - IPAddress=None, - IPPrefixLen=0, - links=None, - macAddress=None, - ) - if networks[key].get('Aliases', None) is not None: - facts['networks'][key]['aliases'] = networks[key]['Aliases'] - if networks[key].get('GlobalIPv6Address', None) is not None: - facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] - if networks[key].get('GlobalIPv6PrefixLen', None) is not None: - facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] - if networks[key].get('IPAddress', None) is not None: - facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] - if networks[key].get('IPPrefixLen', None) is not None: - facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] - if networks[key].get('Links', None) is not None: - facts['networks'][key]['links'] = networks[key]['Links'] - if networks[key].get('MacAddress', None) is not None: - facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] - - service_facts[container.name] = facts - - return result - - def cmd_pull(self): - result = dict( - changed=False, - actions=[], + return events + + def _run_subcommand( + self, + subcommand: List[str], + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + ) -> Tuple[int, str, str, List[ResourceEvent]]: + command = [DOCKER_COMPOSE_EXECUTABLE, '--ansi', 'never'] + for file in files: + command.extend(['-f', file]) + if project_name is not None: + command.extend(['-p', project_name]) + if project_directory is not None: + command.extend(['--project-directory', project_directory]) + if env_file is not None: + command.extend(['--env-file', env_file]) + for profile in profiles: + command.extend(['--profile', profile]) + command += subcommand + kwargs = {} + if content is not None: + kwargs['data'] = content + env = { + 'DOCKER_HOST': self._docker_host + } + self._module.debug('DOCKER-COMPOSE command: {!r:s}'.format(command)) + self._module.debug('DOCKER-COMPOSE stdin: {!r:s}'.format(content)) + self._module.debug('DOCKER-COMPOSE env: {!r:s}'.format(env)) + rc, out, err = self._module.run_command( + command, + environ_update=env, + **kwargs, + ) + self._module.debug('DOCKER-COMPOSE rc: {:d}'.format(rc)) + self._module.debug('DOCKER-COMPOSE stdout: {!r:s}'.format(out)) + self._module.debug('DOCKER-COMPOSE stderr: {!r:s}'.format(err)) + events = self._parse_stderr(err) + return rc, out, err, events + + def up( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + services: List[str] = [], + no_deps: bool = False, + pull: Optional[Union[Literal['always'], Literal['missing'], Literal['never']]] = None, + build: bool = False, + force_recreate: bool = False, + no_recreate: bool = False, + remove_orphans: bool = False, + timeout: Optional[int] = None, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['up', '-d'] + if no_deps: + subcommand.append('--no-deps') + if pull: + subcommand.extend(['--pull', pull]) + if build: + subcommand.append('--build') + if force_recreate: + subcommand.append('--force-recreate') + if no_recreate: + subcommand.append('--no-recreate') + if remove_orphans: + subcommand.append('--remove-orphans') + if timeout is not None: + subcommand.extend(['--timeout', '{:d}'.format(timeout)]) + for service in services: + subcommand.append(service) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - if not self.check_mode: - for service in self.project.get_services(self.services, include_deps=False): - if 'image' not in service.options: - continue - - self.log('Pulling image for service %s' % service.name) - # store the existing image ID - old_image_id = '' - try: - image = service.image() - if image and image.get('Id'): - old_image_id = image['Id'] - except NoSuchImageError: - pass - except Exception as exc: - self.client.fail("Error: service image lookup failed - %s" % to_native(exc)) - - out_redir_name, err_redir_name = make_redirection_tempfiles() - # pull the image - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - service.pull(ignore_pull_failures=False) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error: pull failed with %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - - # store the new image ID - new_image_id = '' - try: - image = service.image() - if image and image.get('Id'): - new_image_id = image['Id'] - except NoSuchImageError as exc: - self.client.fail("Error: service image lookup failed after pull - %s" % to_native(exc)) - - if new_image_id != old_image_id: - # if a new image was pulled - result['changed'] = True - result['actions'].append(dict( - service=service.name, - pulled_image=dict( - name=service.image_name, - id=new_image_id - ) - )) - return result - - def cmd_build(self): - result = dict( - changed=False, - actions=[] + def down( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + remove_orphans: bool = False, + rmi: Optional[Union[Literal['all'], Literal['local']]] = None, + volumes: bool = False, + timeout: Optional[int] = None, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['down'] + if remove_orphans: + subcommand.append('--remove-orphans') + if rmi: + subcommand.extend(['--rmi', rmi]) + if volumes: + subcommand.extend(['--volumes']) + if timeout is not None: + subcommand.extend(['--timeout', '{:d}'.format(timeout)]) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - if not self.check_mode: - for service in self.project.get_services(self.services, include_deps=False): - if service.can_be_built(): - self.log('Building image for service %s' % service.name) - # store the existing image ID - old_image_id = '' - try: - image = service.image() - if image and image.get('Id'): - old_image_id = image['Id'] - except NoSuchImageError: - pass - except Exception as exc: - self.client.fail("Error: service image lookup failed - %s" % to_native(exc)) - - out_redir_name, err_redir_name = make_redirection_tempfiles() - # build the image - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - new_image_id = service.build(pull=self.pull, no_cache=self.nocache) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error: build failed with %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - - if new_image_id not in old_image_id: - # if a new image was built - result['changed'] = True - result['actions'].append(dict( - service=service.name, - built_image=dict( - name=service.image_name, - id=new_image_id - ) - )) - return result - - def _service_profile_enabled(self, service): - """Returns `True` if the service has no profiles defined or has a profile which is among - the profiles passed to the `docker compose up` command. Otherwise returns `False`. - """ - if LooseVersion(compose_version) < LooseVersion('1.28.0'): - return True - return service.enabled_for_profiles(self.profiles or []) - - def cmd_down(self): - result = dict( - changed=False, - actions=[] + + def stop( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + timeout: Optional[int] = None, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['stop'] + if timeout is not None: + subcommand.extend(['--timeout', '{:d}'.format(timeout)]) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - for service in self.project.services: - containers = service.containers(stopped=True) - if len(containers): - result['changed'] = True - result['actions'].append(dict( - service=service.name, - deleted=[container.name for container in containers] - )) - if not self.check_mode and result['changed']: - image_type = image_type_from_opt('--rmi', self.remove_images) - out_redir_name, err_redir_name = make_redirection_tempfiles() - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - self.project.down(image_type, self.remove_volumes, self.remove_orphans) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error stopping project - %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - return result - - def cmd_stop(self, service_names): - result = dict( - changed=False, - actions=[] + + def restart( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + services: List[str] = [], + timeout: Optional[int] = None, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['restart'] + if timeout is not None: + subcommand.extend(['--timeout', '{:d}'.format(timeout)]) + for service in services: + subcommand.append(service) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - for service in self.project.services: - if not service_names or service.name in service_names: - service_res = dict( - service=service.name, - stop=[] - ) - for container in service.containers(stopped=False): - result['changed'] = True - service_res['stop'].append(dict( - id=container.id, - name=container.name, - short_id=container.short_id - )) - result['actions'].append(service_res) - if not self.check_mode and result['changed']: - out_redir_name, err_redir_name = make_redirection_tempfiles() - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - self.project.stop(service_names=service_names, timeout=self.timeout) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error stopping project %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - return result - - def cmd_restart(self, service_names): - result = dict( - changed=False, - actions=[] + + def build( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + services: List[str] = [], + no_cache: bool = False, + pull: bool = False, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['build'] + if no_cache: + subcommand.append('--no-cache') + if pull: + subcommand.append('--pull') + for service in services: + subcommand.append(service) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - for service in self.project.services: - if not service_names or service.name in service_names: - service_res = dict( - service=service.name, - restart=[] - ) - for container in service.containers(stopped=True): - result['changed'] = True - service_res['restart'].append(dict( - id=container.id, - name=container.name, - short_id=container.short_id - )) - result['actions'].append(service_res) - - if not self.check_mode and result['changed']: - out_redir_name, err_redir_name = make_redirection_tempfiles() - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - self.project.restart(service_names=service_names, timeout=self.timeout) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error restarting project %s") - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - return result - - def cmd_scale(self): - result = dict( - changed=False, - actions=[] + def pull( + self, + # Common arguments + files: List[str], + /, + content: Optional[str] = None, + *, + project_name: Optional[str] = None, + project_directory: Optional[str] = None, + profiles: List[str] = [], + env_file: Optional[str], + # Specific arguments + services: List[str] = [], + include_deps: bool = False, + ) -> Tuple[int, str, str, List[ResourceEvent]]: + subcommand = ['pull'] + if include_deps: + subcommand.append('--include-deps') + for service in services: + subcommand.append(service) + return self._run_subcommand( + subcommand, + files, + content, + project_name=project_name, + project_directory=project_directory, + profiles=profiles, + env_file=env_file, ) - for service in self.project.services: - if service.name in self.scale: - service_res = dict( - service=service.name, - scale=0 - ) - containers = service.containers(stopped=True) - scale = self.parse_scale(service.name) - if len(containers) != scale: - result['changed'] = True - service_res['scale'] = scale - len(containers) - if not self.check_mode: - out_redir_name, err_redir_name = make_redirection_tempfiles() - try: - with stdout_redirector(out_redir_name): - with stderr_redirector(err_redir_name): - service.scale(scale) - except Exception as exc: - fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, - msg_format="Error scaling {0} - %s".format(service.name)) - self.client.fail(**fail_reason) - else: - cleanup_redirection_tempfiles(out_redir_name, err_redir_name) - result['actions'].append(service_res) - return result - - def parse_scale(self, service_name): - try: - return int(self.scale[service_name]) - except ValueError: - self.client.fail("Error scaling %s - expected int, got %s", - service_name, to_native(type(self.scale[service_name]))) def main(): - argument_spec = dict( - project_src=dict(type='path'), - project_name=dict(type='str',), - env_file=dict(type='path'), - files=dict(type='list', elements='path'), - profiles=dict(type='list', elements='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), - definition=dict(type='dict'), - hostname_check=dict(type='bool', default=False), - recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), - build=dict(type='bool', default=False), - remove_images=dict(type='str', choices=['all', 'local']), - remove_volumes=dict(type='bool', default=False), - remove_orphans=dict(type='bool', default=False), - stopped=dict(type='bool', default=False), - restarted=dict(type='bool', default=False), - scale=dict(type='dict'), - services=dict(type='list', elements='str'), - dependencies=dict(type='bool', default=True), - pull=dict(type='bool', default=False), - nocache=dict(type='bool', default=False), - debug=dict(type='bool', default=False), - timeout=dict(type='int') + module = AnsibleModule( + argument_spec=dict( + docker_host=DOCKER_COMMON_ARGS['docker_host'], + project_src=dict(type='path'), + project_name=dict(type='str',), + env_file=dict(type='path'), + files=dict(type='list', elements='path'), + profiles=dict(type='list', elements='str'), + state=dict(type='str', default='present', choices=['absent', 'present', 'built', 'pulled', 'restarted', 'stopped']), + definition=dict(type='dict'), + recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']), + build=dict(type='bool', default=False), + remove_images=dict(type='str', choices=['all', 'local']), + remove_volumes=dict(type='bool', default=False), + remove_orphans=dict(type='bool', default=False), + stopped=dict(type='bool', default=False), + restarted=dict(type='bool', default=False), + services=dict(type='list', elements='str'), + dependencies=dict(type='bool', default=True), + pull=dict(type='bool', default=False), + nocache=dict(type='bool', default=False), + timeout=dict(type='int') + ), + mutually_exclusive=[ + ('definition', 'files'), + ], + required_by={ + 'definition': ('project_src', ), + }, + required_one_of=[ + ('files', 'definition'), + ], ) - - mutually_exclusive = [ - ('definition', 'project_src'), - ('definition', 'files') - ] - - client = AnsibleDockerClient( - argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True, + if bool(module.params['stopped']) and bool(module.params['restarted']): + module.fail_json(changed=False, msg='Cannot use restarted and stopped at the same time.') + changed = False + compose = ComposeManager(module, module.params['docker_host']) + if module.params['definition'] is not None: + if not HAS_YAML: + raise Exception('Require YAML support') + common_args = [ + ['-'], + yaml_dump(module.params['definition']), + ] + else: + common_args = [ + module.params['files'], + ] + common_kwargs = dict( + project_name=module.params['project_name'], + project_directory=module.params['project_src'], + profiles=module.params['profiles'] or [], + env_file=module.params['env_file'], ) - - try: - result = ContainerManager(client).exec_module() - client.module.exit_json(**result) - except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) - except RequestException as e: - client.fail( - 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)), - exception=traceback.format_exc()) + if module.params['state'] == 'present': + rc, out, err, events = compose.up( + *common_args, + **common_kwargs, + services=module.params['services'] or [], + no_deps=not module.params['dependencies'], + pull='always' if module.params['pull'] else None, + build=module.params['build'], + force_recreate=module.params['recreate'] == "always", + no_recreate=module.params['recreate'] == "never", + remove_orphans=module.params['remove_orphans'], + timeout=module.params['timeout'], + ) + elif module.params['state'] == 'stopped': + rc, out, err, events = compose.stop( + *common_args, + **common_kwargs, + timeout=module.params['timeout'], + ) + elif module.params['state'] == 'restarted': + rc, out, err, events = compose.restart( + *common_args, + **common_kwargs, + services=module.params['services'] or [], + timeout=module.params['timeout'], + ) + elif module.params['state'] == 'built': + rc, out, err, events = compose.build( + *common_args, + **common_kwargs, + services=module.params['services'] or [], + no_cache=not module.params['nocache'], + pull=module.params['pull'], + ) + elif module.params['state'] == 'pulled': + rc, out, err, events = compose.pull( + *common_args, + **common_kwargs, + services=module.params['services'] or [], + include_deps=module.params['dependencies'], + ) + changed = True # We cannot detect change from docker-compose stderr + elif module.params['state'] == 'absent': + rc, out, err, events = compose.down( + *common_args, + **common_kwargs, + remove_orphans=module.params['remove_orphans'], + rmi=module.params['remove_images'], + volumes=module.params['remove_volumes'], + timeout=module.params['timeout'], + ) + else: + assert False # DEAD CODE + networks_states = defaultdict(list) + images_states = defaultdict(list) + volumes_states = defaultdict(list) + containers_states = defaultdict(list) + for event in events: + collection = { + ResourceType.NETWORK: networks_states, + ResourceType.IMAGE: images_states, + ResourceType.VOLUME: volumes_states, + ResourceType.CONTAINER: containers_states, + }[event.resource_type] + if event.status not in STATUS_DONE: + continue + if event.status in STATUS_THAT_CAUSE_A_CHANGE: + changed = True + collection[event.resource_id].append(STATUS_DOCKERCOMPOSE_TO_COMMUNITYDOCKER[event.status]) + result = dict( + changed=changed, + networks={k: list(v) for k, v in networks_states.items()}, + images={k: list(v) for k, v in images_states.items()}, + volumes={k: list(v) for k, v in volumes_states.items()}, + containers={k: list(v) for k, v in containers_states.items()}, + stdout=out, + stderr=err, + ) + if rc != 0: + result['msg'] = "docker-compose exited with code {:d}. Read stderr for more information.".format(rc) + module.fail_json(**result) + else: + module.exit_json(**result) if __name__ == '__main__':