diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1ebd3492..c13aeaeb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,4 +38,4 @@ jobs: pip install -U pip pip install -U .[test] cp broker_settings.yaml.example broker_settings.yaml - pytest -v tests/ --ignore tests/cli_scenarios + pytest -v tests/ --ignore tests/functional diff --git a/.gitignore b/.gitignore index a3e247a0..6e9d040f 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,7 @@ __pycache__/ # Distribution / packaging .Python env/ -venv*/ +ven*/ build/ develop-eggs/ dist/ diff --git a/README.md b/README.md index dcf33656..abd4f95f 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The infrastrucure middleman # Description -Broker is a tool designed to provide a common interface between one or many services that provision virtual machines. It is an abstraction layer that allows you to ignore (most) of the implementation details and just get what you need. +Broker is a tool designed to provide a common interface between one or many services that provision virtual machines or containers. It is an abstraction layer that allows you to ignore most of the implementation details and just get what you need. # Installation ``` @@ -24,18 +24,18 @@ Broker can also be ran outside of its base directory. In order to do so, specify # Configuration The broker_settings.yaml file is used, through DynaConf, to set configuration values for broker's interaction with its 'providers'. -DynaConf integration provides support for setting environment variables to override any settings from the yaml file. +DynaConf integration provides support for setting environment variables to override any settings from broker's config file. -An environment variable override would take the form of: `DYNACONF_AnsibleTower__base_url="https://my.ansibletower.instance.com"`. Note the use of double underscores to model nested maps in yaml. +An environment variable override would take the form of: `BROKER_AnsibleTower__base_url="https://my.ansibletower.instance.com"`. Note the use of double underscores to model nested maps in yaml. -Broker allows for multiple instances of a provider to be in the configuration file. You can name an instance anything you want, then put instance-specfic settings nested under the instance name. One of your instances must have a setting `default: True`. +Broker allows for multiple instances of a provider to be in its config file. You can name an instance anything you want, then put instance-specfic settings nested under the instance name. One of your instances must have a setting `default: True`. For the AnsibleTower provider, authentication can be achieved either through setting a username and password, or through a token (Personal Access Token in Tower). -A username can still be provided when using a token to authenticate. This user will be used for inventory sync (examples below). This may be helpful for AnsibleTower administrators who would like to use their own token to authenticate, but want to set a different user in configuration for checking inventory. +A username can still be provided when using a token to authenticate. This user will be used for inventory sync (examples below). This might be helpful for AnsibleTower administrators who would like to use their own token to authenticate, but want to set a different user in configuration for checking inventory. -# Usage -**Checking out a VM** +# CLI Usage +**Checking out a VM or container** ``` broker checkout --workflow "workflow-name" --workflow-arg1 something --workflow-arg2 else ``` @@ -56,6 +56,7 @@ You can also pass in a file for other arguments, where the contents will become ``` broker checkout --nick rhel7 --extra tests/data/args_file.yaml ``` +**Note:** Check with the provider to determine specific arguments. **Nicks** @@ -74,9 +75,9 @@ broker duplicate 1 3 broker duplicate 0 --count 2 ``` -**Listing your VMs** +**Listing your VMs and containers** -Broker maintains a local inventory of the VMs you've checked out. You can see these with the ```inventory``` command. +Broker maintains a local inventory of the VMs and containers you've checked out. You can see these with the ```inventory``` command. ``` broker inventory ``` @@ -88,13 +89,13 @@ To sync an inventory for a specific user, use the following syntax with `--sync` ``` broker inventory --sync AnsibleTower: ``` -To sync an inventory for a specific instance, use the follow syntax with --sync. +To sync an inventory for a specific instance, use the following syntax with --sync. ``` -broker inventory --sync AnsibleTower:: +broker inventory --sync Container:: ``` This can also be combined with the user syntax above. ``` -broker inventory --sync AnsibleTower::: +broker inventory --sync Container::: ``` @@ -108,9 +109,10 @@ broker extend vmname broker extend --all ``` -**Checking in VMs** +**Checking in VMs and containers** You can also return a VM to its provider with the ```checkin``` command. +Containers checked in this way will be fully deleted regardless of its status. You may use either the local id (```broker inventory```), the hostname, or "all" to checkin everything. ``` broker checkin my.host.fqdn.com @@ -132,6 +134,7 @@ broker providers AnsibleTower --workflow remove-vm **Run arbitrary actions** If a provider action doesn't result in a host creation/removal, Broker allows you to execute that action as well. There are a few output options available as well. +When executing with the Container provider, a new container will be spun up with your command (if specified), ran, and cleaned up. ``` broker execute --help broker execute --workflow my-awesome-workflow --additional-arg True @@ -182,3 +185,71 @@ You can also chain multiple filters together by separating them with a comma. Th `--filter 'name +class MyHost(Host): + ... + def setup(self): + self.register() + + def teardown(self): + self.unregister() +``` +**Note:** One important thing to keep in mind is that Broker will strip any non-pickleable attributes from Host objects when needed. If you encounter this, then it is best to construct your host classes in such a way that they can recover gracefully in these situations. diff --git a/broker/__init__.py b/broker/__init__.py index 5d5548ab..5449b8ad 100644 --- a/broker/__init__.py +++ b/broker/__init__.py @@ -1 +1,3 @@ -from broker.broker import VMBroker +from broker.broker import Broker + +VMBroker = Broker diff --git a/broker/binds/__init__.py b/broker/binds/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/broker/binds/containers.py b/broker/binds/containers.py new file mode 100644 index 00000000..f8ed527f --- /dev/null +++ b/broker/binds/containers.py @@ -0,0 +1,97 @@ +class ContainerBind: + def __init__(self, host=None, username=None, password=None, port=22): + self.host = host + self.username = username + self.password = password + self.port = port + self._client = None + self._ClientClass = None + + @property + def client(self): + if not isinstance(self._client, self._ClientClass): + self._client = self._ClientClass(base_url=self.uri) + return self._client + + @property + def images(self): + return self.client.images.list() + + @property + def containers(self): + return self.client.containers.list(all=True) + + def image_info(self, name): + if image := self.client.images.get(name): + return { + "id": image.short_id, + "tags": image.tags, + "size": image.attrs["Size"], + "config": { + k: v for k, v in image.attrs["Config"].items() if k != "Env" + }, + } + + def create_container(self, image, command=None, **kwargs): + """Create and return running container instance""" + return self.client.containers.create(image, command, **kwargs) + + def execute(self, image, command=None, remove=True, **kwargs): + """Run a container and return the raw result""" + return self.client.containers.run( + image, command=command, remove=remove, **kwargs + ).decode() + + def remove_container(self, container=None): + if container: + container.remove(v=True, force=True) + + def pull_image(self, name): + return self.client.images.pull(name) + + @staticmethod + def get_logs(container): + return "\n".join(map(lambda x: x.decode(), container.logs(stream=False))) + + @staticmethod + def get_attrs(cont): + return { + "id": cont.id, + "image": cont.attrs.get("ImageName", cont.attrs["Image"]), + "name": cont.name or cont.attrs["Names"][0], + "container_config": cont.attrs.get("Config", {}), + "host_config": cont.attrs.get("HostConfig", {}), + "ports": cont.ports or cont.attrs.get("Ports"), + } + + def __repr__(self): + inner = ", ".join( + f"{k}={v}" + for k, v in self.__dict__.items() + if not k.startswith("_") and not callable(v) + ) + return f"{self.__class__.__name__}({inner})" + + +class PodmanBind(ContainerBind): + def __init__(self, host=None, username=None, password=None, port=22): + super().__init__(host, username, password, port) + from podman import PodmanClient + + self._ClientClass = PodmanClient + if self.host == "localhost": + self.uri = "unix:///run/user/1000/podman/podman.sock" + else: + self.uri = f"http+ssh://{username}@{host}:{port}/run/podman/podman.sock" + + +class DockerBind(ContainerBind): + def __init__(self, host=None, username=None, password=None, port=22): + super().__init__(host, username, password, port) + from docker import DockerClient + + self._ClientClass = DockerClient + if self.host == "localhost": + self.uri = "unix://var/run/docker.sock" + else: + self.uri = f"tcp://{host}:{port}" diff --git a/broker/broker.py b/broker/broker.py index 92d1584c..9d5e8fdb 100644 --- a/broker/broker.py +++ b/broker/broker.py @@ -1,25 +1,32 @@ from logzero import logger from broker.providers.ansible_tower import AnsibleTower +from broker.providers.container import Container from broker.providers.test_provider import TestProvider from broker.hosts import Host from broker import exceptions, helpers from concurrent.futures import ProcessPoolExecutor, as_completed -PROVIDERS = {"AnsibleTower": AnsibleTower, "TestProvider": TestProvider} +PROVIDERS = { + "AnsibleTower": AnsibleTower, + "Container": Container, + "TestProvider": TestProvider, +} PROVIDER_ACTIONS = { # action: (InterfaceClass, "method_name") "workflow": (AnsibleTower, "execute"), "job_template": (AnsibleTower, "execute"), "template": (AnsibleTower, None), # needed for list-templates - "test_action": (TestProvider, "test_action"), "inventory": (AnsibleTower, None), + "container_host": (Container, "run_container"), + "container_app": (Container, "execute"), + "test_action": (TestProvider, "test_action"), } class mp_decorator: - """This decorator wraps VMBroker methods to enable multiprocessing + """This decorator wraps Broker methods to enable multiprocessing The decorated method is expected to return an itearable. """ @@ -70,7 +77,7 @@ def mp_split(*args, **kwargs): return mp_split -class VMBroker: +class Broker: # map exceptions for easier access when used as a library BrokerError = exceptions.BrokerError AuthenticationError = exceptions.AuthenticationError @@ -81,10 +88,10 @@ class VMBroker: def __init__(self, **kwargs): kwargs = helpers.resolve_file_args(kwargs) + logger.debug(f"Broker instantiated with {kwargs=}") self._hosts = kwargs.pop("hosts", []) self.host_classes = {"host": Host} # if a nick was specified, pull in the resolved arguments - logger.debug(f"Broker instantiated with {kwargs=}") if "nick" in kwargs: nick = kwargs.pop("nick") kwargs = helpers.merge_dicts(kwargs, helpers.resolve_nick(nick)) @@ -131,7 +138,6 @@ def _checkout(self): logger.info(f"Using provider {provider.__name__} to checkout") try: host = self._act(provider, method, checkout=True) - logger.debug(f"host={host}") except exceptions.ProviderError as err: host = err if host and not isinstance(host, exceptions.ProviderError): @@ -188,8 +194,10 @@ def _checkin(self, host): host.close() try: host.release() - except Exception: - pass + except Exception as err: + logger.warning(f"Encountered exception during checkin: {err}") + raise + # pass return host def checkin(self, sequential=False, host=None): @@ -299,7 +307,7 @@ def sync_inventory(provider): instance = {provider: instance} prov_inventory = PROVIDERS[provider](**instance).get_inventory(additional_arg) curr_inventory = [ - host["hostname"] or host["name"] + host.get("hostname", host.get("name")) for host in helpers.load_inventory() if host["_broker_provider"] == provider ] @@ -328,6 +336,14 @@ def from_inventory(self, filter=None): inv_hosts = helpers.load_inventory(filter=filter) return [self.reconstruct_host(inv_host) for inv_host in inv_hosts] + def __repr__(self): + inner = ", ".join( + f"{k}={v}" + for k, v in self.__dict__.items() + if not k.startswith("_") and not callable(v) + ) + return f"{self.__class__.__name__}({inner})" + def __enter__(self): try: hosts = self.checkout() diff --git a/broker/commands.py b/broker/commands.py index 8c88a0c9..47046b68 100644 --- a/broker/commands.py +++ b/broker/commands.py @@ -2,7 +2,7 @@ import sys import click from logzero import logger -from broker.broker import PROVIDERS, PROVIDER_ACTIONS, VMBroker +from broker.broker import PROVIDERS, PROVIDER_ACTIONS, Broker from broker.providers import Provider from broker import exceptions, helpers, settings @@ -53,7 +53,7 @@ def populate_providers(click_group): @click_group.command(name=prov, hidden=prov_class.hidden) def provider_cmd(*args, **kwargs): # the actual subcommand """Get information about a provider's actions""" - broker_inst = VMBroker(**kwargs) + broker_inst = Broker(**kwargs) broker_inst.nick_help() # iterate through available actions and populate options from them @@ -165,7 +165,7 @@ def checkout(ctx, background, nick, count, args_file, **kwargs): ) if background: helpers.fork_broker() - broker_inst = VMBroker(**broker_args) + broker_inst = Broker(**broker_args) broker_inst.checkout() @@ -206,9 +206,14 @@ def checkin(vm, background, all_, sequential, filter): inventory = helpers.load_inventory(filter=filter) to_remove = [] for num, host in enumerate(inventory): - if str(num) in vm or host["hostname"] in vm or host["name"] in vm or all_: - to_remove.append(VMBroker().reconstruct_host(host)) - broker_inst = VMBroker(hosts=to_remove) + if ( + str(num) in vm + or host.get("hostname") in vm + or host.get("name") in vm + or all_ + ): + to_remove.append(Broker().reconstruct_host(host)) + broker_inst = Broker(hosts=to_remove) broker_inst.checkin(sequential=sequential) @@ -227,7 +232,7 @@ def inventory(details, sync, filter): hostname pulled from list of dictionaries """ if sync: - VMBroker.sync_inventory(provider=sync) + Broker.sync_inventory(provider=sync) logger.info("Pulling local inventory") inventory = helpers.load_inventory(filter=filter) emit_data = [] @@ -235,10 +240,10 @@ def inventory(details, sync, filter): emit_data.append(host) if details: logger.info( - f"{num}: {host['hostname'] or host['name']}, Details: {helpers.yaml_format(host)}" + f"{num}: {host.get('hostname', host.get('name'))}, Details: {helpers.yaml_format(host)}" ) else: - logger.info(f"{num}: {host['hostname'] or host['name']}") + logger.info(f"{num}: {host.get('hostname', host.get('name'))}") helpers.emit({"inventory": emit_data}) @@ -273,8 +278,8 @@ def extend(vm, background, all_, sequential, filter, **kwargs): to_extend = [] for num, host in enumerate(inventory): if str(num) in vm or host["hostname"] in vm or host["name"] in vm or all_: - to_extend.append(VMBroker().reconstruct_host(host)) - broker_inst = VMBroker(hosts=to_extend, **broker_args) + to_extend.append(Broker().reconstruct_host(host)) + broker_inst = Broker(hosts=to_extend, **broker_args) broker_inst.extend(sequential=sequential) @@ -313,7 +318,7 @@ def duplicate(vm, background, count, all_, filter): if count: broker_args["_count"] = count logger.info(f"Duplicating: {host['hostname']}") - broker_inst = VMBroker(**broker_args) + broker_inst = Broker(**broker_args) broker_inst.checkout() else: logger.warning( @@ -376,7 +381,7 @@ def execute(ctx, background, nick, output_format, artifacts, args_file, **kwargs ) if background: helpers.fork_broker() - broker_inst = VMBroker(**broker_args) + broker_inst = Broker(**broker_args) result = broker_inst.execute() helpers.emit({"output": result}) if output_format == "raw": diff --git a/broker/helpers.py b/broker/helpers.py index d06f61d4..adcb56a4 100644 --- a/broker/helpers.py +++ b/broker/helpers.py @@ -322,11 +322,12 @@ def __getattr__(self, name): return self def __getitem__(self, key): - item = getattr(self, key, self) + if isinstance(key, str): + item = getattr(self, key, self) try: item = super().__getitem__(key) except KeyError: - pass + item = self return item def __call__(self, *args, **kwargs): @@ -446,3 +447,45 @@ def __enter__(self): def __exit__(self, *tb_info): self.return_file() + + +class Result: + """Dummy result class for presenting results in dot access""" + + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __repr__(self): + return getattr(self, "stdout") + + @classmethod + def from_ssh(cls, stdout, channel): + return cls( + stdout=stdout, + status=channel.get_exit_status(), + stderr=channel.read_stderr(), + ) + + @classmethod + def from_duplexed_exec(cls, duplex_exec): + if duplex_exec.output[0]: + stdout = duplex_exec.output[0].decode("utf-8") + else: + stdout = "" + if duplex_exec.output[1]: + stderr = duplex_exec.output[1].decode("utf-8") + else: + stderr = "" + return cls( + status=duplex_exec.exit_code, + stdout=stdout, + stderr=stderr, + ) + + @classmethod + def from_nonduplexed_exec(cls, nonduplex_exec): + return cls( + status=nonduplex_exec.exit_code, + stdout=nonduplex_exec.output.decode("utf-8"), + stderr="", + ) diff --git a/broker/hosts.py b/broker/hosts.py index 0b6d1568..aac47927 100644 --- a/broker/hosts.py +++ b/broker/hosts.py @@ -50,17 +50,23 @@ def _purify(self): pickle.dumps(obj) except (pickle.PicklingError, AttributeError): self.__dict__[key] = None + except RecursionError: + logger.warning(f"Recursion limit reached on {obj=}") def connect(self, username=None, password=None, timeout=None): username = username or self.username password = password or self.password timeout = timeout or self.timeout + _hostname, _port = self.hostname, 22 + if ":" in self.hostname: + _hostname, port = self.hostname.split(":") + _port = int(port) self.close() self._session = Session( - hostname=self.hostname, + hostname=_hostname, username=username, password=password, - timeout=timeout, + port=_port, ) def close(self): @@ -100,13 +106,21 @@ def to_dict(self): } def setup(self): - """Automatically ran when entering a VMBroker context manager""" + """Automatically ran when entering a Broker context manager""" pass def teardown(self): - """Automatically ran when exiting a VMBroker context manager""" + """Automatically ran when exiting a Broker context manager""" pass + def __repr__(self): + inner = ", ".join( + f"{k}={v}" + for k, v in self.__dict__.items() + if not k.startswith("_") and not callable(v) + ) + return f"{self.__class__.__name__}({inner})" + @classmethod def from_dict(cls, arg_dict): return cls(**arg_dict, from_dict=True) diff --git a/broker/providers/__init__.py b/broker/providers/__init__.py index 53526ca5..6e1f93f2 100644 --- a/broker/providers/__init__.py +++ b/broker/providers/__init__.py @@ -1,7 +1,9 @@ +import pickle import dynaconf from broker import exceptions from broker.settings import settings +from logzero import logger class Provider: @@ -95,3 +97,26 @@ def extend(self): def release(self, host_obj): raise exceptions.NotImplementedError("release has not been implemented") + + def __repr__(self): + inner = ", ".join( + f"{k}={v}" + for k, v in self.__dict__.items() + if not k.startswith("_") and not callable(v) + ) + return f"{self.__class__.__name__}({inner})" + + def __getstate__(self): + """If a session is active, remove it for pickle compatability""" + self._purify() + return self.__dict__ + + def _purify(self): + """Strip all unpickleable attributes from a Host before pickling""" + for key, obj in self.__dict__.items(): + try: + pickle.dumps(obj) + except (pickle.PicklingError, AttributeError): + self.__dict__[key] = None + except RecursionError: + logger.warning(f"Recursion limit reached on {obj=}") diff --git a/broker/providers/ansible_tower.py b/broker/providers/ansible_tower.py index ff93bb68..6441d969 100644 --- a/broker/providers/ansible_tower.py +++ b/broker/providers/ansible_tower.py @@ -575,6 +575,14 @@ def release(self, name, broker_args=None): **broker_args, ) + def __repr__(self): + inner = ", ".join( + f"{k}={v}" + for k, v in self.__dict__.items() + if not k.startswith("_") and not callable(v) + ) + return f"{self.__class__.__name__}({inner})" + def awxkit_representer(dumper, data): """In order to resolve awxkit objects, a custom representer is needed""" diff --git a/broker/providers/container.py b/broker/providers/container.py new file mode 100644 index 00000000..c800dbec --- /dev/null +++ b/broker/providers/container.py @@ -0,0 +1,278 @@ +import getpass +import inspect +from uuid import uuid4 +import click +from logzero import logger +from dynaconf import Validator +from broker import exceptions +from broker import helpers +from broker.settings import settings +from broker.providers import Provider +from broker.binds import containers + + +def container_execute(self, command, demux=True, **kwargs): + """This method is injected into the container host object on creation""" + kwargs["demux"] = demux + result = self._cont_inst.exec_run(command, **kwargs) + if demux: + return helpers.Result.from_duplexed_exec(result) + else: + return helpers.Result.from_nonduplexed_exec(result) + + +def container_info(container_inst): + return { + "_broker_provider": "Container", + "name": container_inst.name, + "hostname": container_inst.id[:12], + "image": container_inst.image.tags, + "ports": container_inst.ports, + "status": container_inst.status, + } + + +@property +def _cont_inst(self): + """Returns a live container object instance""" + if not getattr(self, "_cont_inst_p", None): + self._cont_inst_p = self._prov_inst._cont_inst_by_name(self.name) + return self._cont_inst_p + + +def _host_release(self): + caller_host = inspect.stack()[1][0].f_locals["host"] + caller_host._cont_inst.remove(v=True, force=True) + + +class Container(Provider): + _validators = [ + Validator("CONTAINER.runtime", default="podman"), + Validator("CONTAINER.host", default="localhost"), + Validator("CONTAINER.host_username", default="root"), + Validator( + "CONTAINER.host_password", + ), + Validator("CONTAINER.host_port", default=22), + Validator("CONTAINER.auto_map_ports", is_type_of=bool, default=True), + ] + _checkout_options = [ + click.option( + "--container-host", + type=str, + help="Name of a broker-compatible container host image", + ), + ] + _execute_options = [ + click.option( + "--container-app", + type=str, + help="Name of a container application image", + ), + ] + _extend_options = [] + + def __init__(self, **kwargs): + instance_name = kwargs.pop("Container", None) + self._validate_settings(instance_name) + if kwargs.get("bind") is not None: + self._runtime_cls = kwargs.pop("bind") + elif settings.container.runtime.lower() == "podman": + self._runtime_cls = containers.PodmanBind + elif settings.container.runtime.lower() == "docker": + self._runtime_cls = containers.DockerBind + else: + raise exceptions.ProviderError( + "Container", + f"Broker has no bind for {settings.container.runtime} containers", + ) + self._runtime = None # this will be used later + self._name_prefix = settings.container.get("name_prefix", getpass.getuser()) + + @property + def runtime(self): + """Making this a property helps to recover from pickle environments""" + if not self._runtime: + self._runtime = self._runtime_cls( + host=settings.container.host, + username=settings.container.host_username, + password=settings.container.host_password, + port=settings.container.host_port, + ) + return self._runtime + + def _ensure_image(self, name): + """Check if an image exists on the provider, attempt a pull if not""" + for image in self.runtime.images: + if name in image.tags: + return + elif ("localhost/" in name) and (name[10:] in image.tags): + return + try: + self.runtime.pull_image(name) + except Exception as err: + raise exceptions.ProviderError( + "Container", f"Unable to find image: {name}\n{err}" + ) + + @staticmethod + def _find_ssh_port(port_map): + """Go through container port map and find the mapping that corresponds to port 22""" + if isinstance(port_map, list): + # [{'hostPort': 1337, 'containerPort': 22, 'protocol': 'tcp', 'hostIP': ''}, + for pm in port_map: + if pm["containerPort"] == 22: + return pm["hostPort"] + elif isinstance(port_map, dict): + # {'22/tcp': [{'HostIp': '', 'HostPort': '1337'}], + for key, val in port_map.items(): + if key.startswith("22"): + return val[0]["HostPort"] + + def _set_attributes(self, host_inst, broker_args=None, cont_inst=None): + host_inst.__dict__.update( + { + "_prov_inst": self, + "_cont_inst_p": cont_inst, + "_broker_provider": "Container", + "_broker_args": broker_args, + } + ) + host_inst.__class__.release = _host_release + host_inst.__class__._cont_inst = _cont_inst + if not cont_inst.ports.get(22): + host_inst.__class__.execute = container_execute + + def _port_mapping(self, image, **kwargs): + """ + 22 + 22:1337 + 22/tcp + 22/tcp:1337 + 22,23 + 22:1337 23:1335 + """ + mapping = {} + if ports := kwargs.pop("ports", None): + if isinstance(ports, str): + for _map in ports.split(): + if ":" in _map: + p, h = _map.split(":") + else: + p, h = _map, None + if "/" in p: + p, s = p.split("/") + else: + p, s = p, "tcp" + mapping[f"{p}/{s}"] = int(h) if h else None + elif settings.container.auto_map_ports: + mapping = { + k: v or None + for k, v in self.runtime.image_info(image)["config"][ + "ExposedPorts" + ].items() + } + return mapping + + def _cont_inst_by_name(self, cont_name): + """Attempt to find and return a container by its name""" + for cont_inst in self.runtime.containers: + if cont_inst.name == cont_name: + return cont_inst + logger.error(f"Unable to find container by name {cont_name}") + + def construct_host(self, provider_params, host_classes, **kwargs): + """Constructs broker host from a container instance + + :param provider_params: a container instance object + + :param host_classes: host object + + :return: broker object of constructed host instance + """ + logger.debug( + f"constructing with {provider_params=}\n{host_classes=}\n{kwargs=}" + ) + if not provider_params: + host_inst = host_classes[kwargs.get("type", "host")](**kwargs) + cont_inst = self._cont_inst_by_name(host_inst.name) + self._set_attributes(host_inst, broker_args=kwargs, cont_inst=cont_inst) + return host_inst + cont_inst = provider_params + cont_attrs = self.runtime.get_attrs(cont_inst) + logger.debug(cont_attrs) + hostname = cont_inst.id[:12] + if port := self._find_ssh_port(cont_attrs["ports"]): + hostname = f"{hostname}:{port}" + if not hostname: + raise Exception(f"Could not determine container hostname:\n{cont_attrs}") + name = cont_attrs["name"] + logger.debug(f"hostname: {hostname}, name: {name}, host type: host") + host_inst = host_classes["host"]( + **{**kwargs, "hostname": hostname, "name": name} + ) + self._set_attributes(host_inst, broker_args=kwargs, cont_inst=cont_inst) + return host_inst + + def nick_help(self, **kwargs): + """Useful information about container images""" + results_limit = kwargs.get("results_limit", settings.CONTAINER.results_limit) + if image := kwargs.get("container_host"): + logger.info( + f"Information for {image} container-host:\n" + f"{helpers.yaml_format(self.runtime.image_info(image))}" + ) + elif kwargs.get("container_hosts"): + images = [ + img.tags[0] + for img in self.runtime.images + if img.labels.get("broker_compatible") + ] + if res_filter := kwargs.get("results_filter"): + images = helpers.results_filter(images, res_filter) + images = "\n".join(images[:results_limit]) + logger.info(f"Available images:\n{images}") + + def get_inventory(self, name_prefix): + """Get all containers that have a matching name prefix""" + name_prefix = name_prefix or self._name_prefix + return [ + container_info(cont) + for cont in self.runtime.containers + if cont.name.startswith(name_prefix) + ] + + def extend(self): + pass + + def release(self, host_obj): + host_obj._cont_inst.remove(force=True) + + def run_container(self, container_host, **kwargs): + """Start a container based on an image name (container_host)""" + self._ensure_image(container_host) + if not kwargs.get("name"): + kwargs["name"] = self._gen_name() + kwargs["ports"] = self._port_mapping(container_host, **kwargs) + container_inst = self.runtime.create_container(container_host, **kwargs) + container_inst.start() + return container_inst + + def execute(self, container_app, **kwargs): + """Run a container and return the raw results""" + return self.runtime.execute(container_app, **kwargs) + + def run_wait_container(self, image_name, **kwargs): + cont_inst = self.run_container(image_name, **kwargs) + cont_inst.wait(condition="excited") + return self.runtime.get_logs(cont_inst) + + def _gen_name(self): + return f"{self._name_prefix}_{str(uuid4()).split('-')[0]}" + + +# execute +# start container (with command?) +# container.wait(condition="exited") +# container.logs(stream=False) +# remove container diff --git a/broker/session.py b/broker/session.py index 78568ee5..b343c717 100644 --- a/broker/session.py +++ b/broker/session.py @@ -4,7 +4,7 @@ from logzero import logger from ssh2.session import Session as ssh2_Session from ssh2 import sftp as ssh2_sftp -from broker.helpers import simple_retry, translate_timeout +from broker.helpers import simple_retry, translate_timeout, Result SESSIONS = {} @@ -21,16 +21,6 @@ class AuthException(Exception): pass -class Result: - """Dummy result class for presenting results in dot access""" - - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - def __repr__(self): - return getattr(self, "stdout") - - class Session: def __init__(self, **kwargs): """Wrapper around ssh2-python's auth/connection system""" @@ -59,10 +49,9 @@ def _read(channel): except UnicodeDecodeError as err: logger.error(f"Skipping data chunk due to {err}\nReceived: {data}") size, data = channel.read() - return Result( + return Result.from_ssh( stdout=results, - status=channel.get_exit_status(), - stderr=channel.read_stderr(), + channel=channel, ) def run(self, command, timeout=0): diff --git a/broker_settings.yaml.example b/broker_settings.yaml.example index 1a94ba14..4b8e73f9 100644 --- a/broker_settings.yaml.example +++ b/broker_settings.yaml.example @@ -27,6 +27,23 @@ AnsibleTower: new_expire_time: "+172800" workflow_timeout: 3600 results_limit: 50 +Container: + instances: + - docker: + host_username: "" + host_password: "" + host_port: None + default: True + - remote: + host: "" + host_username: "" + host_password: "" + runtime: 'docker' + # name used to prefix container names, used to distinguish yourself + # if not set, then your local username will be used + # name_prefix: test + results_limit: 50 + auto_map_ports: False TestProvider: instances: - test1: diff --git a/setup.cfg b/setup.cfg index 4d6992aa..98dca478 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,6 +16,7 @@ classifiers = Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 [options] install_requires = @@ -31,13 +32,15 @@ zip_safe = False [options.extras_require] test = pytest -setup = +setup = setuptools setuptools-scm wheel twine +docker = docker +podman = podman-py [options.entry_points] -console_scripts = +console_scripts = broker = broker.commands:cli diff --git a/tests/data/cli_scenarios/containers/checkout_ch-d_ubi8.yaml b/tests/data/cli_scenarios/containers/checkout_ch-d_ubi8.yaml new file mode 100644 index 00000000..9505d38a --- /dev/null +++ b/tests/data/cli_scenarios/containers/checkout_ch-d_ubi8.yaml @@ -0,0 +1 @@ +container_host: ch-d:ubi8 diff --git a/tests/data/cli_scenarios/containers/execute_ch-d_ubi8.yaml b/tests/data/cli_scenarios/containers/execute_ch-d_ubi8.yaml new file mode 100644 index 00000000..53e9180c --- /dev/null +++ b/tests/data/cli_scenarios/containers/execute_ch-d_ubi8.yaml @@ -0,0 +1,2 @@ +container_app: ch-d:ubi8 +command: "ls -lah" diff --git a/tests/data/cli_scenarios/satlab/checkout_rhel78.yaml b/tests/data/cli_scenarios/satlab/checkout_rhel78.yaml index f78fef03..c75a4eb3 100644 --- a/tests/data/cli_scenarios/satlab/checkout_rhel78.yaml +++ b/tests/data/cli_scenarios/satlab/checkout_rhel78.yaml @@ -1,2 +1,2 @@ workflow: deploy-base-rhel -rhel_version: "7.9" +rhel_version: "7.8" diff --git a/tests/data/container/fake_containers.json b/tests/data/container/fake_containers.json new file mode 100644 index 00000000..56a097b7 --- /dev/null +++ b/tests/data/container/fake_containers.json @@ -0,0 +1,57 @@ +[ + { + "Id": "f37d3058317f006e91db56682487ecbd915dc063474c6644ae9d1e6ace4f0a89", + "Created": "2022-02-28T22:18:29.399266035Z", + "Path": "/bin/sh", + "Args": [ + "-c", + "/tmp/startup.sh" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false + }, + "Image": "sha256:960c4adb67cc3bc49b67391c5ac562db46d84b0432ad3edf964cf85f0ef63d2d", + "Name": "/jake_06c92962", + "Platform": "linux", + "HostConfig": { + "Binds": null, + "NetworkMode": "default", + "PortBindings": null + }, + "GraphDriver": { + "Data": null, + "Name": "btrfs" + }, + "Mounts": [], + "Config": { + "Hostname": "f37d3058317f", + "Domainname": "", + "User": "", + "ExposedPorts": { + "22/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "container=oci", + "HOME=/root" + ], + "Cmd": [ + "/bin/sh", + "-c", + "/tmp/startup.sh" + ], + "Image": "ch-d:ubi8", + "Volumes": null, + "WorkingDir": "/root", + "Entrypoint": null, + "Labels": { + "architecture": "x86_64", + "broker_compatible": "True", + "name": "ubi8", + "version": "8.5" + } + } + } +] diff --git a/tests/data/container/fake_images.json b/tests/data/container/fake_images.json new file mode 100644 index 00000000..e03f889f --- /dev/null +++ b/tests/data/container/fake_images.json @@ -0,0 +1,51 @@ +[ + { + "Id": "sha256:960c4adb67cc3bc49b67391c5ac562db46d84b0432ad3edf964cf85f0ef63d2d", + "RepoTags": [ + "ch-d:ubi8" + ], + "RepoDigests": [], + "Parent": "sha256:fea6edb8ca1a4cbdec9bef9e822d38620b93624b24a09c9d1af240fcfd569ccb", + "Author": "https://github.com/JacobCallahan", + "Config": { + "Hostname": "f27f3e2c9f57", + "ExposedPorts": { + "22/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "container=oci", + "HOME=/root" + ], + "Cmd": [ + "/bin/sh", + "-c", + "/tmp/startup.sh" + ], + "Image": "sha256:fea6edb8ca1a4cbdec9bef9e822d38620b93624b24a09c9d1af240fcfd569ccb", + "Volumes": null, + "WorkingDir": "/root", + "Entrypoint": null, + "Labels": { + "architecture": "x86_64", + "broker_compatible": "True", + "name": "ubi8", + "version": "8.5" + } + }, + "Architecture": "amd64", + "Os": "linux", + "Size": 237849999, + "VirtualSize": 237849999, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:a9820c2af00a34f160836f6ef2044d88e6019ca19b3c15ec22f34afe9d73f41c", + "sha256:3d5ecee9360ea8711f32d2af0cab1eae4d53140496f961ca1a634b5e2e817412" + ] + }, + "Metadata": { + "LastTagTime": "2022-02-18T16:22:59.134232153-05:00" + } + } +] diff --git a/tests/functional/test_containers.py b/tests/functional/test_containers.py new file mode 100644 index 00000000..1e7facb7 --- /dev/null +++ b/tests/functional/test_containers.py @@ -0,0 +1,73 @@ +from pathlib import Path +import pytest +from click.testing import CliRunner +from broker import Broker +from broker.commands import cli +from broker.providers.container import Container +from broker.settings import inventory_path + +SCENARIO_DIR = Path("tests/data/cli_scenarios/containers") + + +@pytest.fixture(scope="module", autouse=True) +def skip_if_not_configured(): + try: + Container() + except Exception as err: + pytest.skip(f"Container is not configured correctly: {err}") + + +@pytest.fixture(scope="module") +def temp_inventory(): + """Temporarily move the local inventory, then move it back when done""" + backup_path = inventory_path.rename(f"{inventory_path.absolute()}.bak") + yield + CliRunner().invoke( + cli, ["checkin", "--all", "--filter", "_broker_provider