diff --git a/Jenkinsfile b/Jenkinsfile index 211159bc28..8724c10fb3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,10 +45,13 @@ def getDockerVersions = { -> } def getAPIVersion = { engineVersion -> - def versionMap = ['17.06': '1.30', '17.12': '1.35', '18.02': '1.36', '18.03': '1.37'] + def versionMap = [ + '17.06': '1.30', '17.12': '1.35', '18.02': '1.36', '18.03': '1.37', + '18.06': '1.38', '18.09': '1.39' + ] def result = versionMap[engineVersion.substring(0, 5)] if (!result) { - return '1.37' + return '1.39' } return result } @@ -88,7 +91,7 @@ def runTests = { Map settings -> --network ${testNetwork} \\ --volumes-from ${dindContainerName} \\ ${testImage} \\ - py.test -v -rxs tests/integration + py.test -v -rxs --cov=docker tests/ """ } finally { sh """ diff --git a/docker/api/build.py b/docker/api/build.py index 3a67ff8b28..53c94b0dcf 100644 --- a/docker/api/build.py +++ b/docker/api/build.py @@ -19,7 +19,8 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None, forcerm=False, dockerfile=None, container_limits=None, decode=False, buildargs=None, gzip=False, shmsize=None, labels=None, cache_from=None, target=None, network_mode=None, - squash=None, extra_hosts=None, platform=None, isolation=None): + squash=None, extra_hosts=None, platform=None, isolation=None, + use_config_proxy=False): """ Similar to the ``docker build`` command. Either ``path`` or ``fileobj`` needs to be set. ``path`` can be a local path (to a directory @@ -103,6 +104,10 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None, platform (str): Platform in the format ``os[/arch[/variant]]`` isolation (str): Isolation technology used during build. Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. Returns: A generator for the build output. @@ -168,6 +173,10 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None, } params.update(container_limits) + if use_config_proxy: + proxy_args = self._proxy_configs.get_environment() + for k, v in proxy_args.items(): + buildargs.setdefault(k, v) if buildargs: params.update({'buildargs': json.dumps(buildargs)}) @@ -286,38 +295,20 @@ def _set_auth_headers(self, headers): # If we don't have any auth data so far, try reloading the config # file one more time in case anything showed up in there. - if not self._auth_configs: + if not self._auth_configs or self._auth_configs.is_empty: log.debug("No auth config in memory - loading from filesystem") - self._auth_configs = auth.load_config() + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) # Send the full auth configuration (if any exists), since the build # could use any (or all) of the registries. if self._auth_configs: - auth_cfgs = self._auth_configs - auth_data = {} - if auth_cfgs.get('credsStore'): - # Using a credentials store, we need to retrieve the - # credentials for each registry listed in the config.json file - # Matches CLI behavior: https://github.com/docker/docker/blob/ - # 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/ - # credentials/native_store.go#L68-L83 - for registry in auth_cfgs.get('auths', {}).keys(): - auth_data[registry] = auth.resolve_authconfig( - auth_cfgs, registry, - credstore_env=self.credstore_env, - ) - else: - for registry in auth_cfgs.get('credHelpers', {}).keys(): - auth_data[registry] = auth.resolve_authconfig( - auth_cfgs, registry, - credstore_env=self.credstore_env - ) - for registry, creds in auth_cfgs.get('auths', {}).items(): - if registry not in auth_data: - auth_data[registry] = creds - # See https://github.com/docker/docker-py/issues/1683 - if auth.INDEX_NAME in auth_data: - auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME] + auth_data = self._auth_configs.get_all_credentials() + + # See https://github.com/docker/docker-py/issues/1683 + if auth.INDEX_URL not in auth_data and auth.INDEX_URL in auth_data: + auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {}) log.debug( 'Sending auth config ({0})'.format( @@ -325,9 +316,10 @@ def _set_auth_headers(self, headers): ) ) - headers['X-Registry-Config'] = auth.encode_header( - auth_data - ) + if auth_data: + headers['X-Registry-Config'] = auth.encode_header( + auth_data + ) else: log.debug('No auth config found') diff --git a/docker/api/client.py b/docker/api/client.py index 197846d105..668dfeef86 100644 --- a/docker/api/client.py +++ b/docker/api/client.py @@ -32,8 +32,9 @@ from ..tls import TLSConfig from ..transport import SSLAdapter, UnixAdapter from ..utils import utils, check_resource, update_headers, config -from ..utils.socket import frames_iter, socket_raw_iter +from ..utils.socket import frames_iter, consume_socket_output, demux_adaptor from ..utils.json_stream import json_stream +from ..utils.proxy import ProxyConfig try: from ..transport import NpipeAdapter except ImportError: @@ -114,8 +115,17 @@ def __init__(self, base_url=None, version=None, self.headers['User-Agent'] = user_agent self._general_configs = config.load_general_config() + + proxy_config = self._general_configs.get('proxies', {}) + try: + proxies = proxy_config[base_url] + except KeyError: + proxies = proxy_config.get('default', {}) + + self._proxy_configs = ProxyConfig.from_dict(proxies) + self._auth_configs = auth.load_config( - config_dict=self._general_configs + config_dict=self._general_configs, credstore_env=credstore_env, ) self.credstore_env = credstore_env @@ -381,19 +391,23 @@ def _stream_raw_result(self, response, chunk_size=1, decode=True): for out in response.iter_content(chunk_size, decode): yield out - def _read_from_socket(self, response, stream, tty=False): + def _read_from_socket(self, response, stream, tty=True, demux=False): socket = self._get_raw_response_socket(response) - gen = None - if tty is False: - gen = frames_iter(socket) + gen = frames_iter(socket, tty) + + if demux: + # The generator will output tuples (stdout, stderr) + gen = (demux_adaptor(*frame) for frame in gen) else: - gen = socket_raw_iter(socket) + # The generator will output strings + gen = (data for (_, data) in gen) if stream: return gen else: - return six.binary_type().join(gen) + # Wait for all the frames, concatenate them, and return the result + return consume_socket_output(gen, demux=demux) def _disable_socket_timeout(self, socket): """ Depending on the combination of python version and whether we're @@ -476,4 +490,6 @@ def reload_config(self, dockercfg_path=None): Returns: None """ - self._auth_configs = auth.load_config(dockercfg_path) + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) diff --git a/docker/api/config.py b/docker/api/config.py index 767bef263a..93e5168f64 100644 --- a/docker/api/config.py +++ b/docker/api/config.py @@ -42,7 +42,7 @@ def inspect_config(self, id): Retrieve config metadata Args: - id (string): Full ID of the config to remove + id (string): Full ID of the config to inspect Returns (dict): A dictionary of metadata diff --git a/docker/api/container.py b/docker/api/container.py index fce73af640..43ae5320ff 100644 --- a/docker/api/container.py +++ b/docker/api/container.py @@ -13,7 +13,7 @@ class ContainerApiMixin(object): @utils.check_resource('container') def attach(self, container, stdout=True, stderr=True, - stream=False, logs=False): + stream=False, logs=False, demux=False): """ Attach to a container. @@ -28,11 +28,15 @@ def attach(self, container, stdout=True, stderr=True, stream (bool): Return container output progressively as an iterator of strings, rather than a single string. logs (bool): Include the container's previous output. + demux (bool): Keep stdout and stderr separate. Returns: - By default, the container's output as a single string. + By default, the container's output as a single string (two if + ``demux=True``: one for stdout and one for stderr). - If ``stream=True``, an iterator of output strings. + If ``stream=True``, an iterator of output strings. If + ``demux=True``, two iterators are returned: one for stdout and one + for stderr. Raises: :py:class:`docker.errors.APIError` @@ -54,8 +58,7 @@ def attach(self, container, stdout=True, stderr=True, response = self._post(u, headers=headers, params=params, stream=True) output = self._read_from_socket( - response, stream, self._check_is_tty(container) - ) + response, stream, self._check_is_tty(container), demux=demux) if stream: return CancellableStream(output, response) @@ -218,7 +221,8 @@ def create_container(self, image, command=None, hostname=None, user=None, working_dir=None, domainname=None, host_config=None, mac_address=None, labels=None, stop_signal=None, networking_config=None, healthcheck=None, - stop_timeout=None, runtime=None): + stop_timeout=None, runtime=None, + use_config_proxy=False): """ Creates a container. Parameters are similar to those for the ``docker run`` command except it doesn't support the attach options (``-a``). @@ -387,6 +391,10 @@ def create_container(self, image, command=None, hostname=None, user=None, runtime (str): Runtime to use with this container. healthcheck (dict): Specify a test to perform to check that the container is healthy. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being created. Returns: A dictionary with an image 'Id' key and a 'Warnings' key. @@ -400,6 +408,14 @@ def create_container(self, image, command=None, hostname=None, user=None, if isinstance(volumes, six.string_types): volumes = [volumes, ] + if isinstance(environment, dict): + environment = utils.utils.format_environment(environment) + + if use_config_proxy: + environment = self._proxy_configs.inject_proxy_environment( + environment + ) + config = self.create_container_config( image, command, hostname, user, detach, stdin_open, tty, ports, environment, volumes, diff --git a/docker/api/daemon.py b/docker/api/daemon.py index 76a94cf034..f715a131ad 100644 --- a/docker/api/daemon.py +++ b/docker/api/daemon.py @@ -42,8 +42,8 @@ def events(self, since=None, until=None, filters=None, decode=None): Example: - >>> for event in client.events() - ... print event + >>> for event in client.events(decode=True) + ... print(event) {u'from': u'image/with:tag', u'id': u'container-id', u'status': u'start', @@ -54,7 +54,7 @@ def events(self, since=None, until=None, filters=None, decode=None): >>> events = client.events() >>> for event in events: - ... print event + ... print(event) >>> # and cancel from another thread >>> events.close() """ @@ -124,13 +124,15 @@ def login(self, username, password=None, email=None, registry=None, # If dockercfg_path is passed check to see if the config file exists, # if so load that config. if dockercfg_path and os.path.exists(dockercfg_path): - self._auth_configs = auth.load_config(dockercfg_path) - elif not self._auth_configs: - self._auth_configs = auth.load_config() - - authcfg = auth.resolve_authconfig( - self._auth_configs, registry, credstore_env=self.credstore_env, - ) + self._auth_configs = auth.load_config( + dockercfg_path, credstore_env=self.credstore_env + ) + elif not self._auth_configs or self._auth_configs.is_empty: + self._auth_configs = auth.load_config( + credstore_env=self.credstore_env + ) + + authcfg = self._auth_configs.resolve_authconfig(registry) # If we found an existing auth config for this registry and username # combination, we can return it immediately unless reauth is requested. if authcfg and authcfg.get('username', None) == username \ @@ -146,9 +148,7 @@ def login(self, username, password=None, email=None, registry=None, response = self._post_json(self._url('/auth'), data=req_data) if response.status_code == 200: - if 'auths' not in self._auth_configs: - self._auth_configs['auths'] = {} - self._auth_configs['auths'][registry or auth.INDEX_NAME] = req_data + self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data) return self._result(response, json=True) def ping(self): diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py index 986d87f21c..d13b128998 100644 --- a/docker/api/exec_api.py +++ b/docker/api/exec_api.py @@ -118,7 +118,7 @@ def exec_resize(self, exec_id, height=None, width=None): @utils.check_resource('exec_id') def exec_start(self, exec_id, detach=False, tty=False, stream=False, - socket=False): + socket=False, demux=False): """ Start a previously set up exec instance. @@ -130,11 +130,14 @@ def exec_start(self, exec_id, detach=False, tty=False, stream=False, stream (bool): Stream response data. Default: False socket (bool): Return the connection socket to allow custom read/write operations. + demux (bool): Return stdout and stderr separately Returns: - (generator or str): If ``stream=True``, a generator yielding - response chunks. If ``socket=True``, a socket object for the - connection. A string containing response data otherwise. + + (generator or str or tuple): If ``stream=True``, a generator + yielding response chunks. If ``socket=True``, a socket object for + the connection. A string containing response data otherwise. If + ``demux=True``, stdout and stderr are separated. Raises: :py:class:`docker.errors.APIError` @@ -162,4 +165,4 @@ def exec_start(self, exec_id, detach=False, tty=False, stream=False, return self._result(res) if socket: return self._get_raw_response_socket(res) - return self._read_from_socket(res, stream, tty) + return self._read_from_socket(res, stream, tty=tty, demux=demux) diff --git a/docker/api/image.py b/docker/api/image.py index 5a6537e797..d3fed5c0cc 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -353,8 +353,8 @@ def pull(self, repository, tag=None, stream=False, auth_config=None, Example: - >>> for line in cli.pull('busybox', stream=True): - ... print(json.dumps(json.loads(line), indent=4)) + >>> for line in cli.pull('busybox', stream=True, decode=True): + ... print(json.dumps(line, indent=4)) { "status": "Pulling image (latest) from busybox", "progressDetail": {}, @@ -429,12 +429,12 @@ def push(self, repository, tag=None, stream=False, auth_config=None, If the server returns an error. Example: - >>> for line in cli.push('yourname/app', stream=True): - ... print line - {"status":"Pushing repository yourname/app (1 tags)"} - {"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"} - {"status":"Image already pushed, skipping","progressDetail":{}, - "id":"511136ea3c5a"} + >>> for line in cli.push('yourname/app', stream=True, decode=True): + ... print(line) + {'status': 'Pushing repository yourname/app (1 tags)'} + {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} + {'status': 'Image already pushed, skipping', 'progressDetail':{}, + 'id': '511136ea3c5a'} ... """ diff --git a/docker/auth.py b/docker/auth.py index 17158f4ae3..638ab9b0a9 100644 --- a/docker/auth.py +++ b/docker/auth.py @@ -39,11 +39,11 @@ def resolve_index_name(index_name): def get_config_header(client, registry): log.debug('Looking for auth config') - if not client._auth_configs: + if not client._auth_configs or client._auth_configs.is_empty: log.debug( "No auth config in memory - loading from filesystem" ) - client._auth_configs = load_config() + client._auth_configs = load_config(credstore_env=client.credstore_env) authcfg = resolve_authconfig( client._auth_configs, registry, credstore_env=client.credstore_env ) @@ -70,81 +70,256 @@ def split_repo_name(repo_name): def get_credential_store(authconfig, registry): - if not registry or registry == INDEX_NAME: - registry = 'https://index.docker.io/v1/' + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig) + return authconfig.get_credential_store(registry) + + +class AuthConfig(dict): + def __init__(self, dct, credstore_env=None): + if 'auths' not in dct: + dct['auths'] = {} + self.update(dct) + self._credstore_env = credstore_env + self._stores = {} + + @classmethod + def parse_auth(cls, entries, raise_on_error=False): + """ + Parses authentication entries + + Args: + entries: Dict of authentication entries. + raise_on_error: If set to true, an invalid format will raise + InvalidConfigFile + + Returns: + Authentication registry. + """ + + conf = {} + for registry, entry in six.iteritems(entries): + if not isinstance(entry, dict): + log.debug( + 'Config entry for key {0} is not auth config'.format( + registry + ) + ) + # We sometimes fall back to parsing the whole config as if it + # was the auth config by itself, for legacy purposes. In that + # case, we fail silently and return an empty conf if any of the + # keys is not formatted properly. + if raise_on_error: + raise errors.InvalidConfigFile( + 'Invalid configuration for registry {0}'.format( + registry + ) + ) + return {} + if 'identitytoken' in entry: + log.debug( + 'Found an IdentityToken entry for registry {0}'.format( + registry + ) + ) + conf[registry] = { + 'IdentityToken': entry['identitytoken'] + } + continue # Other values are irrelevant if we have a token + + if 'auth' not in entry: + # Starting with engine v1.11 (API 1.23), an empty dictionary is + # a valid value in the auths config. + # https://github.com/docker/compose/issues/3265 + log.debug( + 'Auth data for {0} is absent. Client might be using a ' + 'credentials store instead.'.format(registry) + ) + conf[registry] = {} + continue - return authconfig.get('credHelpers', {}).get(registry) or authconfig.get( - 'credsStore' - ) + username, password = decode_auth(entry['auth']) + log.debug( + 'Found entry (registry={0}, username={1})' + .format(repr(registry), repr(username)) + ) + conf[registry] = { + 'username': username, + 'password': password, + 'email': entry.get('email'), + 'serveraddress': registry, + } + return conf + + @classmethod + def load_config(cls, config_path, config_dict, credstore_env=None): + """ + Loads authentication data from a Docker configuration file in the given + root directory or if config_path is passed use given path. + Lookup priority: + explicit config_path parameter > DOCKER_CONFIG environment + variable > ~/.docker/config.json > ~/.dockercfg + """ + + if not config_dict: + config_file = config.find_config_file(config_path) + + if not config_file: + return cls({}, credstore_env) + try: + with open(config_file) as f: + config_dict = json.load(f) + except (IOError, KeyError, ValueError) as e: + # Likely missing new Docker config file or it's in an + # unknown format, continue to attempt to read old location + # and format. + log.debug(e) + return cls(_load_legacy_config(config_file), credstore_env) + + res = {} + if config_dict.get('auths'): + log.debug("Found 'auths' section") + res.update({ + 'auths': cls.parse_auth( + config_dict.pop('auths'), raise_on_error=True + ) + }) + if config_dict.get('credsStore'): + log.debug("Found 'credsStore' section") + res.update({'credsStore': config_dict.pop('credsStore')}) + if config_dict.get('credHelpers'): + log.debug("Found 'credHelpers' section") + res.update({'credHelpers': config_dict.pop('credHelpers')}) + if res: + return cls(res, credstore_env) -def resolve_authconfig(authconfig, registry=None, credstore_env=None): - """ - Returns the authentication data from the given auth configuration for a - specific registry. As with the Docker client, legacy entries in the config - with full URLs are stripped down to hostnames before checking for a match. - Returns None if no match was found. - """ + log.debug( + "Couldn't find auth-related section ; attempting to interpret " + "as auth-only file" + ) + return cls({'auths': cls.parse_auth(config_dict)}, credstore_env) - if 'credHelpers' in authconfig or 'credsStore' in authconfig: - store_name = get_credential_store(authconfig, registry) - if store_name is not None: - log.debug( - 'Using credentials store "{0}"'.format(store_name) - ) - cfg = _resolve_authconfig_credstore( - authconfig, registry, store_name, env=credstore_env - ) - if cfg is not None: - return cfg - log.debug('No entry in credstore - fetching from auth dict') + @property + def auths(self): + return self.get('auths', {}) - # Default to the public index server - registry = resolve_index_name(registry) if registry else INDEX_NAME - log.debug("Looking for auth entry for {0}".format(repr(registry))) + @property + def creds_store(self): + return self.get('credsStore', None) - authdict = authconfig.get('auths', {}) - if registry in authdict: - log.debug("Found {0}".format(repr(registry))) - return authdict[registry] + @property + def cred_helpers(self): + return self.get('credHelpers', {}) - for key, conf in six.iteritems(authdict): - if resolve_index_name(key) == registry: - log.debug("Found {0}".format(repr(key))) - return conf + @property + def is_empty(self): + return ( + not self.auths and not self.creds_store and not self.cred_helpers + ) - log.debug("No entry found") - return None + def resolve_authconfig(self, registry=None): + """ + Returns the authentication data from the given auth configuration for a + specific registry. As with the Docker client, legacy entries in the + config with full URLs are stripped down to hostnames before checking + for a match. Returns None if no match was found. + """ + + if self.creds_store or self.cred_helpers: + store_name = self.get_credential_store(registry) + if store_name is not None: + log.debug( + 'Using credentials store "{0}"'.format(store_name) + ) + cfg = self._resolve_authconfig_credstore(registry, store_name) + if cfg is not None: + return cfg + log.debug('No entry in credstore - fetching from auth dict') + # Default to the public index server + registry = resolve_index_name(registry) if registry else INDEX_NAME + log.debug("Looking for auth entry for {0}".format(repr(registry))) -def _resolve_authconfig_credstore(authconfig, registry, credstore_name, - env=None): - if not registry or registry == INDEX_NAME: - # The ecosystem is a little schizophrenic with index.docker.io VS - # docker.io - in that case, it seems the full URL is necessary. - registry = INDEX_URL - log.debug("Looking for auth entry for {0}".format(repr(registry))) - store = dockerpycreds.Store(credstore_name, environment=env) - try: - data = store.get(registry) - res = { - 'ServerAddress': registry, - } - if data['Username'] == TOKEN_USERNAME: - res['IdentityToken'] = data['Secret'] - else: - res.update({ - 'Username': data['Username'], - 'Password': data['Secret'], - }) - return res - except dockerpycreds.CredentialsNotFound as e: - log.debug('No entry found') + if registry in self.auths: + log.debug("Found {0}".format(repr(registry))) + return self.auths[registry] + + for key, conf in six.iteritems(self.auths): + if resolve_index_name(key) == registry: + log.debug("Found {0}".format(repr(key))) + return conf + + log.debug("No entry found") return None - except dockerpycreds.StoreError as e: - raise errors.DockerException( - 'Credentials store error: {0}'.format(repr(e)) - ) + + def _resolve_authconfig_credstore(self, registry, credstore_name): + if not registry or registry == INDEX_NAME: + # The ecosystem is a little schizophrenic with index.docker.io VS + # docker.io - in that case, it seems the full URL is necessary. + registry = INDEX_URL + log.debug("Looking for auth entry for {0}".format(repr(registry))) + store = self._get_store_instance(credstore_name) + try: + data = store.get(registry) + res = { + 'ServerAddress': registry, + } + if data['Username'] == TOKEN_USERNAME: + res['IdentityToken'] = data['Secret'] + else: + res.update({ + 'Username': data['Username'], + 'Password': data['Secret'], + }) + return res + except dockerpycreds.CredentialsNotFound: + log.debug('No entry found') + return None + except dockerpycreds.StoreError as e: + raise errors.DockerException( + 'Credentials store error: {0}'.format(repr(e)) + ) + + def _get_store_instance(self, name): + if name not in self._stores: + self._stores[name] = dockerpycreds.Store( + name, environment=self._credstore_env + ) + return self._stores[name] + + def get_credential_store(self, registry): + if not registry or registry == INDEX_NAME: + registry = INDEX_URL + + return self.cred_helpers.get(registry) or self.creds_store + + def get_all_credentials(self): + auth_data = self.auths.copy() + if self.creds_store: + # Retrieve all credentials from the default store + store = self._get_store_instance(self.creds_store) + for k in store.list().keys(): + auth_data[k] = self._resolve_authconfig_credstore( + k, self.creds_store + ) + + # credHelpers entries take priority over all others + for reg, store_name in self.cred_helpers.items(): + auth_data[reg] = self._resolve_authconfig_credstore( + reg, store_name + ) + + return auth_data + + def add_auth(self, reg, data): + self['auths'][reg] = data + + +def resolve_authconfig(authconfig, registry=None, credstore_env=None): + if not isinstance(authconfig, AuthConfig): + authconfig = AuthConfig(authconfig, credstore_env) + return authconfig.resolve_authconfig(registry) def convert_to_hostname(url): @@ -177,100 +352,11 @@ def parse_auth(entries, raise_on_error=False): Authentication registry. """ - conf = {} - for registry, entry in six.iteritems(entries): - if not isinstance(entry, dict): - log.debug( - 'Config entry for key {0} is not auth config'.format(registry) - ) - # We sometimes fall back to parsing the whole config as if it was - # the auth config by itself, for legacy purposes. In that case, we - # fail silently and return an empty conf if any of the keys is not - # formatted properly. - if raise_on_error: - raise errors.InvalidConfigFile( - 'Invalid configuration for registry {0}'.format(registry) - ) - return {} - if 'identitytoken' in entry: - log.debug('Found an IdentityToken entry for registry {0}'.format( - registry - )) - conf[registry] = { - 'IdentityToken': entry['identitytoken'] - } - continue # Other values are irrelevant if we have a token, skip. - - if 'auth' not in entry: - # Starting with engine v1.11 (API 1.23), an empty dictionary is - # a valid value in the auths config. - # https://github.com/docker/compose/issues/3265 - log.debug( - 'Auth data for {0} is absent. Client might be using a ' - 'credentials store instead.'.format(registry) - ) - conf[registry] = {} - continue - - username, password = decode_auth(entry['auth']) - log.debug( - 'Found entry (registry={0}, username={1})' - .format(repr(registry), repr(username)) - ) + return AuthConfig.parse_auth(entries, raise_on_error) - conf[registry] = { - 'username': username, - 'password': password, - 'email': entry.get('email'), - 'serveraddress': registry, - } - return conf - -def load_config(config_path=None, config_dict=None): - """ - Loads authentication data from a Docker configuration file in the given - root directory or if config_path is passed use given path. - Lookup priority: - explicit config_path parameter > DOCKER_CONFIG environment variable > - ~/.docker/config.json > ~/.dockercfg - """ - - if not config_dict: - config_file = config.find_config_file(config_path) - - if not config_file: - return {} - try: - with open(config_file) as f: - config_dict = json.load(f) - except (IOError, KeyError, ValueError) as e: - # Likely missing new Docker config file or it's in an - # unknown format, continue to attempt to read old location - # and format. - log.debug(e) - return _load_legacy_config(config_file) - - res = {} - if config_dict.get('auths'): - log.debug("Found 'auths' section") - res.update({ - 'auths': parse_auth(config_dict.pop('auths'), raise_on_error=True) - }) - if config_dict.get('credsStore'): - log.debug("Found 'credsStore' section") - res.update({'credsStore': config_dict.pop('credsStore')}) - if config_dict.get('credHelpers'): - log.debug("Found 'credHelpers' section") - res.update({'credHelpers': config_dict.pop('credHelpers')}) - if res: - return res - - log.debug( - "Couldn't find auth-related section ; attempting to interpret " - "as auth-only file" - ) - return {'auths': parse_auth(config_dict)} +def load_config(config_path=None, config_dict=None, credstore_env=None): + return AuthConfig.load_config(config_path, config_dict, credstore_env) def _load_legacy_config(config_file): diff --git a/docker/errors.py b/docker/errors.py index 0253695a5f..c340dcb123 100644 --- a/docker/errors.py +++ b/docker/errors.py @@ -63,6 +63,9 @@ def status_code(self): if self.response is not None: return self.response.status_code + def is_error(self): + return self.is_client_error() or self.is_server_error() + def is_client_error(self): if self.status_code is None: return False diff --git a/docker/models/containers.py b/docker/models/containers.py index 9d6f2cc6af..10f667d706 100644 --- a/docker/models/containers.py +++ b/docker/models/containers.py @@ -144,7 +144,7 @@ def diff(self): def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, privileged=False, user='', detach=False, stream=False, - socket=False, environment=None, workdir=None): + socket=False, environment=None, workdir=None, demux=False): """ Run a command inside this container. Similar to ``docker exec``. @@ -166,16 +166,17 @@ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, the following format ``["PASSWORD=xxx"]`` or ``{"PASSWORD": "xxx"}``. workdir (str): Path to working directory for this exec session + demux (bool): Return stdout and stderr separately Returns: (ExecResult): A tuple of (exit_code, output) exit_code: (int): Exit code for the executed command or ``None`` if either ``stream```or ``socket`` is ``True``. - output: (generator or str): + output: (generator or bytes): If ``stream=True``, a generator yielding response chunks. If ``socket=True``, a socket object for the connection. - A string containing response data otherwise. + A bytestring containing response data otherwise. Raises: :py:class:`docker.errors.APIError` @@ -184,10 +185,11 @@ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False, resp = self.client.api.exec_create( self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty, privileged=privileged, user=user, environment=environment, - workdir=workdir + workdir=workdir, ) exec_output = self.client.api.exec_start( - resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket + resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket, + demux=demux ) if socket or stream: return ExecResult(None, exec_output) @@ -675,6 +677,7 @@ def run(self, image, command=None, stdout=True, stderr=False, For example: ``{"Name": "on-failure", "MaximumRetryCount": 5}`` + runtime (str): Runtime to use with this container. security_opt (:py:class:`list`): A list of string values to customize labels for MLS systems, such as SELinux. shm_size (str or int): Size of /dev/shm (e.g. ``1G``). @@ -706,6 +709,10 @@ def run(self, image, command=None, stdout=True, stderr=False, tty (bool): Allocate a pseudo-TTY. ulimits (:py:class:`list`): Ulimits to set inside the container, as a list of :py:class:`docker.types.Ulimit` instances. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. user (str or int): Username or UID to run commands as inside the container. userns_mode (str): Sets the user namespace mode for the container @@ -730,7 +737,6 @@ def run(self, image, command=None, stdout=True, stderr=False, volumes_from (:py:class:`list`): List of container names or IDs to get volumes from. working_dir (str): Path to the working directory. - runtime (str): Runtime to use with this container. Returns: The container logs, either ``STDOUT``, ``STDERR``, or both, @@ -945,6 +951,7 @@ def prune(self, filters=None): 'stdin_open', 'stop_signal', 'tty', + 'use_config_proxy', 'user', 'volume_driver', 'working_dir', diff --git a/docker/models/images.py b/docker/models/images.py index 30e86f109e..af94520d9b 100644 --- a/docker/models/images.py +++ b/docker/models/images.py @@ -258,6 +258,10 @@ def build(self, **kwargs): platform (str): Platform in the format ``os[/arch[/variant]]``. isolation (str): Isolation technology used during build. Default: `None`. + use_config_proxy (bool): If ``True``, and if the docker client + configuration file (``~/.docker/config.json`` by default) + contains a proxy configuration, the corresponding environment + variables will be set in the container being built. Returns: (tuple): The first item is the :py:class:`Image` object for the diff --git a/docker/models/services.py b/docker/models/services.py index a2a3ed011f..5d2bd9b3ec 100644 --- a/docker/models/services.py +++ b/docker/models/services.py @@ -153,10 +153,12 @@ def create(self, image, command=None, **kwargs): image (str): The image name to use for the containers. command (list of str or str): Command to run. args (list of str): Arguments to the command. - constraints (list of str): Placement constraints. - preferences (list of str): Placement preferences. - platforms (list of tuple): A list of platforms constraints - expressed as ``(arch, os)`` tuples + constraints (list of str): :py:class:`~docker.types.Placement` + constraints. + preferences (list of tuple): :py:class:`~docker.types.Placement` + preferences. + platforms (list of tuple): A list of platform constraints + expressed as ``(arch, os)`` tuples. container_labels (dict): Labels to apply to the container. endpoint_spec (EndpointSpec): Properties that can be configured to access and load balance a service. Default: ``None``. diff --git a/docker/models/swarm.py b/docker/models/swarm.py index 7396e730d7..3a02ae3707 100644 --- a/docker/models/swarm.py +++ b/docker/models/swarm.py @@ -112,6 +112,7 @@ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377', init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs) self.client.api.init_swarm(**init_kwargs) self.reload() + return True def join(self, *args, **kwargs): return self.client.api.join_swarm(*args, **kwargs) diff --git a/docker/types/__init__.py b/docker/types/__init__.py index 64512333df..f3cac1bc17 100644 --- a/docker/types/__init__.py +++ b/docker/types/__init__.py @@ -5,7 +5,8 @@ from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig from .services import ( ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec, - Mount, Placement, Privileges, Resources, RestartPolicy, RollbackConfig, - SecretReference, ServiceMode, TaskTemplate, UpdateConfig + Mount, Placement, PlacementPreference, Privileges, Resources, + RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate, + UpdateConfig ) from .swarm import SwarmSpec, SwarmExternalCA diff --git a/docker/types/containers.py b/docker/types/containers.py index d040c0fb5e..fd8cab4979 100644 --- a/docker/types/containers.py +++ b/docker/types/containers.py @@ -51,8 +51,7 @@ class LogConfig(DictType): ... host_config=hc) >>> client.inspect_container(container)['HostConfig']['LogConfig'] {'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}} - - """ # flake8: noqa + """ # noqa: E501 types = LogConfigTypesEnum def __init__(self, **kwargs): @@ -320,10 +319,10 @@ def __init__(self, version, binds=None, port_bindings=None, if not isinstance(ulimits, list): raise host_config_type_error('ulimits', ulimits, 'list') self['Ulimits'] = [] - for l in ulimits: - if not isinstance(l, Ulimit): - l = Ulimit(**l) - self['Ulimits'].append(l) + for lmt in ulimits: + if not isinstance(lmt, Ulimit): + lmt = Ulimit(**lmt) + self['Ulimits'].append(lmt) if log_config is not None: if not isinstance(log_config, LogConfig): diff --git a/docker/types/daemon.py b/docker/types/daemon.py index 700f9a90c4..af3e5bcb5e 100644 --- a/docker/types/daemon.py +++ b/docker/types/daemon.py @@ -15,7 +15,7 @@ class CancellableStream(object): Example: >>> events = client.events() >>> for event in events: - ... print event + ... print(event) >>> # and cancel from another thread >>> events.close() """ diff --git a/docker/types/services.py b/docker/types/services.py index c66d41a167..ac1c181a90 100644 --- a/docker/types/services.py +++ b/docker/types/services.py @@ -648,18 +648,24 @@ class Placement(dict): Placement constraints to be used as part of a :py:class:`TaskTemplate` Args: - constraints (:py:class:`list`): A list of constraints - preferences (:py:class:`list`): Preferences provide a way to make - the scheduler aware of factors such as topology. They are - provided in order from highest to lowest precedence. - platforms (:py:class:`list`): A list of platforms expressed as - ``(arch, os)`` tuples + constraints (:py:class:`list` of str): A list of constraints + preferences (:py:class:`list` of tuple): Preferences provide a way + to make the scheduler aware of factors such as topology. They + are provided in order from highest to lowest precedence and + are expressed as ``(strategy, descriptor)`` tuples. See + :py:class:`PlacementPreference` for details. + platforms (:py:class:`list` of tuple): A list of platforms + expressed as ``(arch, os)`` tuples """ def __init__(self, constraints=None, preferences=None, platforms=None): if constraints is not None: self['Constraints'] = constraints if preferences is not None: - self['Preferences'] = preferences + self['Preferences'] = [] + for pref in preferences: + if isinstance(pref, tuple): + pref = PlacementPreference(*pref) + self['Preferences'].append(pref) if platforms: self['Platforms'] = [] for plat in platforms: @@ -668,6 +674,27 @@ def __init__(self, constraints=None, preferences=None, platforms=None): }) +class PlacementPreference(dict): + """ + Placement preference to be used as an element in the list of + preferences for :py:class:`Placement` objects. + + Args: + strategy (string): The placement strategy to implement. Currently, + the only supported strategy is ``spread``. + descriptor (string): A label descriptor. For the spread strategy, + the scheduler will try to spread tasks evenly over groups of + nodes identified by this label. + """ + def __init__(self, strategy, descriptor): + if strategy != 'spread': + raise errors.InvalidArgument( + 'PlacementPreference strategy value is invalid ({}):' + ' must be "spread".'.format(strategy) + ) + self['SpreadOver'] = descriptor + + class DNSConfig(dict): """ Specification for DNS related configurations in resolver configuration @@ -687,7 +714,7 @@ def __init__(self, nameservers=None, search=None, options=None): class Privileges(dict): - """ + r""" Security options for a service's containers. Part of a :py:class:`ContainerSpec` definition. diff --git a/docker/utils/ports.py b/docker/utils/ports.py index bf7d697271..cf5987c94f 100644 --- a/docker/utils/ports.py +++ b/docker/utils/ports.py @@ -3,10 +3,10 @@ PORT_SPEC = re.compile( "^" # Match full string "(" # External part - "((?P[a-fA-F\d.:]+):)?" # Address - "(?P[\d]*)(-(?P[\d]+))?:" # External range + r"((?P[a-fA-F\d.:]+):)?" # Address + r"(?P[\d]*)(-(?P[\d]+))?:" # External range ")?" - "(?P[\d]+)(-(?P[\d]+))?" # Internal range + r"(?P[\d]+)(-(?P[\d]+))?" # Internal range "(?P/(udp|tcp))?" # Protocol "$" # Match full string ) diff --git a/docker/utils/proxy.py b/docker/utils/proxy.py new file mode 100644 index 0000000000..49e98ed912 --- /dev/null +++ b/docker/utils/proxy.py @@ -0,0 +1,73 @@ +from .utils import format_environment + + +class ProxyConfig(dict): + ''' + Hold the client's proxy configuration + ''' + @property + def http(self): + return self.get('http') + + @property + def https(self): + return self.get('https') + + @property + def ftp(self): + return self.get('ftp') + + @property + def no_proxy(self): + return self.get('no_proxy') + + @staticmethod + def from_dict(config): + ''' + Instantiate a new ProxyConfig from a dictionary that represents a + client configuration, as described in `the documentation`_. + + .. _the documentation: + https://docs.docker.com/network/proxy/#configure-the-docker-client + ''' + return ProxyConfig( + http=config.get('httpProxy'), + https=config.get('httpsProxy'), + ftp=config.get('ftpProxy'), + no_proxy=config.get('noProxy'), + ) + + def get_environment(self): + ''' + Return a dictionary representing the environment variables used to + set the proxy settings. + ''' + env = {} + if self.http: + env['http_proxy'] = env['HTTP_PROXY'] = self.http + if self.https: + env['https_proxy'] = env['HTTPS_PROXY'] = self.https + if self.ftp: + env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp + if self.no_proxy: + env['no_proxy'] = env['NO_PROXY'] = self.no_proxy + return env + + def inject_proxy_environment(self, environment): + ''' + Given a list of strings representing environment variables, prepend the + environment variables corresponding to the proxy settings. + ''' + if not self: + return environment + + proxy_env = format_environment(self.get_environment()) + if not environment: + return proxy_env + # It is important to prepend our variables, because we want the + # variables defined in "environment" to take precedence. + return proxy_env + environment + + def __str__(self): + return 'ProxyConfig(http={}, https={}, ftp={}, no_proxy={})'.format( + self.http, self.https, self.ftp, self.no_proxy) diff --git a/docker/utils/socket.py b/docker/utils/socket.py index 7b96d4fce6..7ba9505538 100644 --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -12,6 +12,10 @@ NpipeSocket = type(None) +STDOUT = 1 +STDERR = 2 + + class SocketError(Exception): pass @@ -51,28 +55,43 @@ def read_exactly(socket, n): return data -def next_frame_size(socket): +def next_frame_header(socket): """ - Returns the size of the next frame of data waiting to be read from socket, - according to the protocol defined here: + Returns the stream and size of the next frame of data waiting to be read + from socket, according to the protocol defined here: - https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container + https://docs.docker.com/engine/api/v1.24/#attach-to-a-container """ try: data = read_exactly(socket, 8) except SocketError: - return -1 + return (-1, -1) + + stream, actual = struct.unpack('>BxxxL', data) + return (stream, actual) + - _, actual = struct.unpack('>BxxxL', data) - return actual +def frames_iter(socket, tty): + """ + Return a generator of frames read from socket. A frame is a tuple where + the first item is the stream number and the second item is a chunk of data. + + If the tty setting is enabled, the streams are multiplexed into the stdout + stream. + """ + if tty: + return ((STDOUT, frame) for frame in frames_iter_tty(socket)) + else: + return frames_iter_no_tty(socket) -def frames_iter(socket): +def frames_iter_no_tty(socket): """ - Returns a generator of frames read from socket + Returns a generator of data read from the socket when the tty setting is + not enabled. """ while True: - n = next_frame_size(socket) + (stream, n) = next_frame_header(socket) if n < 0: break while n > 0: @@ -84,13 +103,13 @@ def frames_iter(socket): # We have reached EOF return n -= data_length - yield result + yield (stream, result) -def socket_raw_iter(socket): +def frames_iter_tty(socket): """ - Returns a generator of data read from the socket. - This is used for non-multiplexed streams. + Return a generator of data read from the socket when the tty setting is + enabled. """ while True: result = read(socket) @@ -98,3 +117,53 @@ def socket_raw_iter(socket): # We have reached EOF return yield result + + +def consume_socket_output(frames, demux=False): + """ + Iterate through frames read from the socket and return the result. + + Args: + + demux (bool): + If False, stdout and stderr are multiplexed, and the result is the + concatenation of all the frames. If True, the streams are + demultiplexed, and the result is a 2-tuple where each item is the + concatenation of frames belonging to the same stream. + """ + if demux is False: + # If the streams are multiplexed, the generator returns strings, that + # we just need to concatenate. + return six.binary_type().join(frames) + + # If the streams are demultiplexed, the generator yields tuples + # (stdout, stderr) + out = [None, None] + for frame in frames: + # It is guaranteed that for each frame, one and only one stream + # is not None. + assert frame != (None, None) + if frame[0] is not None: + if out[0] is None: + out[0] = frame[0] + else: + out[0] += frame[0] + else: + if out[1] is None: + out[1] = frame[1] + else: + out[1] += frame[1] + return tuple(out) + + +def demux_adaptor(stream_id, data): + """ + Utility to demultiplex stdout and stderr when reading frames from the + socket. + """ + if stream_id == STDOUT: + return (data, None) + elif stream_id == STDERR: + return (None, data) + else: + raise ValueError('{0} is not a valid stream'.format(stream_id)) diff --git a/docker/version.py b/docker/version.py index 0b27a263a6..c3edb8a35e 100644 --- a/docker/version.py +++ b/docker/version.py @@ -1,2 +1,2 @@ -version = "3.6.0" +version = "3.7.0" version_info = tuple([int(d) for d in version.split("-")[0].split(".")]) diff --git a/docs/api.rst b/docs/api.rst index 1682128951..edb8fffadc 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -143,6 +143,7 @@ Configuration types .. autoclass:: LogConfig .. autoclass:: Mount .. autoclass:: Placement +.. autoclass:: PlacementPreference .. autoclass:: Privileges .. autoclass:: Resources .. autoclass:: RestartPolicy diff --git a/docs/change-log.md b/docs/change-log.md index 873db8cef5..008a2ad270 100644 --- a/docs/change-log.md +++ b/docs/change-log.md @@ -1,6 +1,28 @@ Change log ========== +3.7.0 +----- + +[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/56?closed=1) + +### Features + +* Added support for multiplexed streams (for `attach` and `exec_start`). Learn + more at https://docker-py.readthedocs.io/en/stable/user_guides/multiplex.html +* Added the `use_config_proxy` parameter to the following methods: + `APIClient.build`, `APIClient.create_container`, `DockerClient.images.build` + and `DockerClient.containers.run` (`False` by default). **This parameter** + **will become `True` by default in the 4.0.0 release.** +* Placement preferences for Swarm services are better validated on the client + and documentation has been updated accordingly + +### Bugfixes + +* Fixed a bug where credential stores weren't queried for relevant registry + credentials with certain variations of the `config.json` file. +* `DockerClient.swarm.init` now returns a boolean value as advertised. + 3.6.0 ----- diff --git a/docs/conf.py b/docs/conf.py index 3e17678a83..f46d1f76ea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -69,10 +69,12 @@ # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = u'2.0' +with open('../docker/version.py', 'r') as vfile: + exec(vfile.read()) # The full version, including alpha/beta/rc tags. -release = u'2.0' +release = version +# The short X.Y version. +version = '{}.{}'.format(version_info[0], version_info[1]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/index.rst b/docs/index.rst index 39426b6819..63e85d3635 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -92,4 +92,5 @@ That's just a taste of what you can do with the Docker SDK for Python. For more, volumes api tls + user_guides/index change-log diff --git a/docs/user_guides/index.rst b/docs/user_guides/index.rst new file mode 100644 index 0000000000..79b3a909e3 --- /dev/null +++ b/docs/user_guides/index.rst @@ -0,0 +1,8 @@ +User guides and tutorials +========================= + +.. toctree:: + :maxdepth: 2 + + multiplex + swarm_services \ No newline at end of file diff --git a/docs/user_guides/multiplex.rst b/docs/user_guides/multiplex.rst new file mode 100644 index 0000000000..78d7e3728d --- /dev/null +++ b/docs/user_guides/multiplex.rst @@ -0,0 +1,66 @@ +Handling multiplexed streams +============================ + +.. note:: + The following instruction assume you're interested in getting output from + an ``exec`` command. These instruction are similarly applicable to the + output of ``attach``. + +First create a container that runs in the background: + +>>> client = docker.from_env() +>>> container = client.containers.run( +... 'bfirsh/reticulate-splines', detach=True) + +Prepare the command we are going to use. It prints "hello stdout" +in `stdout`, followed by "hello stderr" in `stderr`: + +>>> cmd = '/bin/sh -c "echo hello stdout ; echo hello stderr >&2"' +We'll run this command with all four the combinations of ``stream`` +and ``demux``. +With ``stream=False`` and ``demux=False``, the output is a string +that contains both the `stdout` and the `stderr` output: +>>> res = container.exec_run(cmd, stream=False, demux=False) +>>> res.output +b'hello stderr\nhello stdout\n' + +With ``stream=True``, and ``demux=False``, the output is a +generator that yields strings containing the output of both +`stdout` and `stderr`: + +>>> res = container.exec_run(cmd, stream=True, demux=False) +>>> next(res.output) +b'hello stdout\n' +>>> next(res.output) +b'hello stderr\n' +>>> next(res.output) +Traceback (most recent call last): + File "", line 1, in +StopIteration + +With ``stream=True`` and ``demux=True``, the generator now +separates the streams, and yield tuples +``(stdout, stderr)``: + +>>> res = container.exec_run(cmd, stream=True, demux=True) +>>> next(res.output) +(b'hello stdout\n', None) +>>> next(res.output) +(None, b'hello stderr\n') +>>> next(res.output) +Traceback (most recent call last): + File "", line 1, in +StopIteration + +Finally, with ``stream=False`` and ``demux=True``, the whole output +is returned, but the streams are still separated: + +>>> res = container.exec_run(cmd, stream=True, demux=True) +>>> next(res.output) +(b'hello stdout\n', None) +>>> next(res.output) +(None, b'hello stderr\n') +>>> next(res.output) +Traceback (most recent call last): + File "", line 1, in +StopIteration diff --git a/docs/user_guides/swarm_services.md b/docs/user_guides/swarm_services.md index 9bd4dca3fb..369fbed00e 100644 --- a/docs/user_guides/swarm_services.md +++ b/docs/user_guides/swarm_services.md @@ -1,5 +1,9 @@ # Swarm services +> Warning: +> This is a stale document and may contain outdated information. +> Refer to the API docs for updated classes and method signatures. + Starting with Engine version 1.12 (API 1.24), it is possible to manage services using the Docker Engine API. Note that the engine needs to be part of a [Swarm cluster](../swarm.rst) before you can use the service-related methods. diff --git a/requirements.txt b/requirements.txt index d13e9d6cad..f1c9bdbc76 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ backports.ssl-match-hostname==3.5.0.1 cffi==1.10.0 cryptography==1.9; python_version == '3.3' cryptography==2.3; python_version > '3.3' -docker-pycreds==0.3.0 +docker-pycreds==0.4.0 enum34==1.1.6 idna==2.5 ipaddress==1.0.18 diff --git a/scripts/release.sh b/scripts/release.sh index 5b37b6d083..d9e7a055a1 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -3,12 +3,6 @@ # Create the official release # -if [ -z "$(command -v pandoc 2> /dev/null)" ]; then - >&2 echo "$0 requires http://pandoc.org/" - >&2 echo "Please install it and make sure it is available on your \$PATH." - exit 2 -fi - VERSION=$1 REPO=docker/docker-py GITHUB_REPO=git@github.com:$REPO @@ -18,8 +12,9 @@ if [ -z $VERSION ]; then exit 1 fi -echo "##> Removing stale build files" -rm -rf ./build || exit 1 +echo "##> Removing stale build files and other untracked files" +git clean -x -d -i +test -z "$(git clean -x -d -n)" || exit 1 echo "##> Tagging the release as $VERSION" git tag $VERSION @@ -37,11 +32,10 @@ if [[ $2 == 'upload' ]]; then fi -pandoc -f markdown -t rst README.md -o README.rst || exit 1 echo "##> sdist & wheel" python setup.py sdist bdist_wheel if [[ $2 == 'upload' ]]; then echo '##> Uploading sdist to pypi' twine upload dist/docker-$VERSION* -fi \ No newline at end of file +fi diff --git a/setup.py b/setup.py index 3ad572b3ec..94fbdf444e 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ requirements = [ 'six >= 1.4.0', 'websocket-client >= 0.32.0', - 'docker-pycreds >= 0.3.0', + 'docker-pycreds >= 0.4.0', 'requests >= 2.14.2, != 2.18.0', ] @@ -55,24 +55,27 @@ long_description = '' -try: - with codecs.open('./README.rst', encoding='utf-8') as readme_rst: - long_description = readme_rst.read() -except IOError: - # README.rst is only generated on release. Its absence should not prevent - # setup.py from working properly. - pass +with codecs.open('./README.md', encoding='utf-8') as readme_md: + long_description = readme_md.read() setup( name="docker", version=version, description="A Python library for the Docker Engine API.", long_description=long_description, + long_description_content_type='text/markdown', url='https://github.com/docker/docker-py', + project_urls={ + 'Documentation': 'https://docker-py.readthedocs.io', + 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501 + 'Source': 'https://github.com/docker/docker-py', + 'Tracker': 'https://github.com/docker/docker-py/issues', + }, packages=find_packages(exclude=["tests.*", "tests"]), install_requires=requirements, tests_require=test_requirements, extras_require=extras_require, + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*', zip_safe=False, test_suite='tests', classifiers=[ @@ -89,6 +92,7 @@ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Topic :: Software Development', 'Topic :: Utilities', 'License :: OSI Approved :: Apache Software License', ], diff --git a/test-requirements.txt b/test-requirements.txt index 07e1a900db..510fa295ec 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,7 +1,9 @@ -coverage==3.7.1 -flake8==3.4.1 +coverage==4.5.2 +flake8==3.6.0; python_version != '3.3' +flake8==3.4.1; python_version == '3.3' mock==1.0.1 pytest==2.9.1; python_version == '3.3' -pytest==3.6.3; python_version > '3.3' -pytest-cov==2.1.0 +pytest==4.1.0; python_version != '3.3' +pytest-cov==2.6.1; python_version != '3.3' +pytest-cov==2.5.1; python_version == '3.3' pytest-timeout==1.3.3 diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py index bad411beec..8bfc7960fc 100644 --- a/tests/integration/api_build_test.py +++ b/tests/integration/api_build_test.py @@ -4,6 +4,7 @@ import tempfile from docker import errors +from docker.utils.proxy import ProxyConfig import pytest import six @@ -13,6 +14,48 @@ class BuildTest(BaseAPIIntegrationTest): + def test_build_with_proxy(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=a"', + 'RUN env | grep "ftp_proxy=a"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build(fileobj=script, decode=True) + + def test_build_with_proxy_and_buildargs(self): + self.client._proxy_configs = ProxyConfig( + ftp='a', http='b', https='c', no_proxy='d' + ) + + script = io.BytesIO('\n'.join([ + 'FROM busybox', + 'RUN env | grep "FTP_PROXY=XXX"', + 'RUN env | grep "ftp_proxy=xxx"', + 'RUN env | grep "HTTP_PROXY=b"', + 'RUN env | grep "http_proxy=b"', + 'RUN env | grep "HTTPS_PROXY=c"', + 'RUN env | grep "https_proxy=c"', + 'RUN env | grep "NO_PROXY=d"', + 'RUN env | grep "no_proxy=d"', + ]).encode('ascii')) + + self.client.build( + fileobj=script, + decode=True, + buildargs={'FTP_PROXY': 'XXX', 'ftp_proxy': 'xxx'} + ) + def test_build_streaming(self): script = io.BytesIO('\n'.join([ 'FROM busybox', diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py index 905e06484d..9e348f3e3f 100644 --- a/tests/integration/api_client_test.py +++ b/tests/integration/api_client_test.py @@ -47,7 +47,7 @@ def test_timeout(self): # This call isn't supposed to complete, and it should fail fast. try: res = self.client.inspect_container('id') - except: + except: # noqa: E722 pass end = time.time() assert res is None diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py index 02f3603374..83df3424a9 100644 --- a/tests/integration/api_container_test.py +++ b/tests/integration/api_container_test.py @@ -7,7 +7,7 @@ import docker from docker.constants import IS_WINDOWS_PLATFORM -from docker.utils.socket import next_frame_size +from docker.utils.socket import next_frame_header from docker.utils.socket import read_exactly import pytest @@ -1242,7 +1242,8 @@ def test_run_container_reading_socket(self): self.client.start(container) - next_size = next_frame_size(pty_stdout) + (stream, next_size) = next_frame_header(pty_stdout) + assert stream == 1 # correspond to stdout assert next_size == len(line) data = read_exactly(pty_stdout, next_size) assert data.decode('utf-8') == line diff --git a/tests/integration/api_exec_test.py b/tests/integration/api_exec_test.py index 1a5a4e5472..e6079eb337 100644 --- a/tests/integration/api_exec_test.py +++ b/tests/integration/api_exec_test.py @@ -1,5 +1,6 @@ -from docker.utils.socket import next_frame_size +from docker.utils.socket import next_frame_header from docker.utils.socket import read_exactly +from docker.utils.proxy import ProxyConfig from .base import BaseAPIIntegrationTest, BUSYBOX from ..helpers import ( @@ -8,6 +9,45 @@ class ExecTest(BaseAPIIntegrationTest): + def test_execute_command_with_proxy_env(self): + # Set a custom proxy config on the client + self.client._proxy_configs = ProxyConfig( + ftp='a', https='b', http='c', no_proxy='d' + ) + + container = self.client.create_container( + BUSYBOX, 'cat', detach=True, stdin_open=True, + use_config_proxy=True, + ) + self.client.start(container) + self.tmp_containers.append(container) + + cmd = 'sh -c "env | grep -i proxy"' + + # First, just make sure the environment variables from the custom + # config are set + + res = self.client.exec_create(container, cmd=cmd) + output = self.client.exec_start(res).decode('utf-8').split('\n') + expected = [ + 'ftp_proxy=a', 'https_proxy=b', 'http_proxy=c', 'no_proxy=d', + 'FTP_PROXY=a', 'HTTPS_PROXY=b', 'HTTP_PROXY=c', 'NO_PROXY=d' + ] + for item in expected: + assert item in output + + # Overwrite some variables with a custom environment + env = {'https_proxy': 'xxx', 'HTTPS_PROXY': 'XXX'} + + res = self.client.exec_create(container, cmd=cmd, environment=env) + output = self.client.exec_start(res).decode('utf-8').split('\n') + expected = [ + 'ftp_proxy=a', 'https_proxy=xxx', 'http_proxy=c', 'no_proxy=d', + 'FTP_PROXY=a', 'HTTPS_PROXY=XXX', 'HTTP_PROXY=c', 'NO_PROXY=d' + ] + for item in expected: + assert item in output + def test_execute_command(self): container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) @@ -75,6 +115,75 @@ def test_exec_command_streaming(self): res += chunk assert res == b'hello\nworld\n' + def test_exec_command_demux(self): + container = self.client.create_container( + BUSYBOX, 'cat', detach=True, stdin_open=True) + id = container['Id'] + self.client.start(id) + self.tmp_containers.append(id) + + script = ' ; '.join([ + # Write something on stdout + 'echo hello out', + # Busybox's sleep does not handle sub-second times. + # This loops takes ~0.3 second to execute on my machine. + 'for i in $(seq 1 50000); do echo $i>/dev/null; done', + # Write something on stderr + 'echo hello err >&2']) + cmd = 'sh -c "{}"'.format(script) + + # tty=False, stream=False, demux=False + res = self.client.exec_create(id, cmd) + exec_log = self.client.exec_start(res) + assert exec_log == b'hello out\nhello err\n' + + # tty=False, stream=True, demux=False + res = self.client.exec_create(id, cmd) + exec_log = self.client.exec_start(res, stream=True) + assert next(exec_log) == b'hello out\n' + assert next(exec_log) == b'hello err\n' + with self.assertRaises(StopIteration): + next(exec_log) + + # tty=False, stream=False, demux=True + res = self.client.exec_create(id, cmd) + exec_log = self.client.exec_start(res, demux=True) + assert exec_log == (b'hello out\n', b'hello err\n') + + # tty=False, stream=True, demux=True + res = self.client.exec_create(id, cmd) + exec_log = self.client.exec_start(res, demux=True, stream=True) + assert next(exec_log) == (b'hello out\n', None) + assert next(exec_log) == (None, b'hello err\n') + with self.assertRaises(StopIteration): + next(exec_log) + + # tty=True, stream=False, demux=False + res = self.client.exec_create(id, cmd, tty=True) + exec_log = self.client.exec_start(res) + assert exec_log == b'hello out\r\nhello err\r\n' + + # tty=True, stream=True, demux=False + res = self.client.exec_create(id, cmd, tty=True) + exec_log = self.client.exec_start(res, stream=True) + assert next(exec_log) == b'hello out\r\n' + assert next(exec_log) == b'hello err\r\n' + with self.assertRaises(StopIteration): + next(exec_log) + + # tty=True, stream=False, demux=True + res = self.client.exec_create(id, cmd, tty=True) + exec_log = self.client.exec_start(res, demux=True) + assert exec_log == (b'hello out\r\nhello err\r\n', None) + + # tty=True, stream=True, demux=True + res = self.client.exec_create(id, cmd, tty=True) + exec_log = self.client.exec_start(res, demux=True, stream=True) + assert next(exec_log) == (b'hello out\r\n', None) + assert next(exec_log) == (b'hello err\r\n', None) + with self.assertRaises(StopIteration): + next(exec_log) + def test_exec_start_socket(self): container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) @@ -91,7 +200,8 @@ def test_exec_start_socket(self): socket = self.client.exec_start(exec_id, socket=True) self.addCleanup(socket.close) - next_size = next_frame_size(socket) + (stream, next_size) = next_frame_header(socket) + assert stream == 1 # stdout (0 = stdin, 1 = stdout, 2 = stderr) assert next_size == len(line) data = read_exactly(socket, next_size) assert data.decode('utf-8') == line diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py index b48f6fb6ce..92eca36d1a 100644 --- a/tests/integration/models_containers_test.py +++ b/tests/integration/models_containers_test.py @@ -163,6 +163,19 @@ def test_run_with_streamed_logs_and_cancel(self): assert logs[0] == b'hello\n' assert logs[1] == b'world\n' + def test_run_with_proxy_config(self): + client = docker.from_env(version=TEST_API_VERSION) + client.api._proxy_configs = docker.utils.proxy.ProxyConfig( + ftp='sakuya.jp:4967' + ) + + out = client.containers.run( + 'alpine', 'sh -c "env"', use_config_proxy=True + ) + + assert b'FTP_PROXY=sakuya.jp:4967\n' in out + assert b'ftp_proxy=sakuya.jp:4967\n' in out + def test_get(self): client = docker.from_env(version=TEST_API_VERSION) container = client.containers.run("alpine", "sleep 300", detach=True) diff --git a/tests/integration/regression_test.py b/tests/integration/regression_test.py index 0fd4e43104..9aab076e30 100644 --- a/tests/integration/regression_test.py +++ b/tests/integration/regression_test.py @@ -14,7 +14,7 @@ def test_443_handle_nonchunked_response_in_stream(self): with pytest.raises(docker.errors.APIError) as exc: for line in self.client.build(fileobj=dfile, tag="a/b/c"): pass - assert exc.value.response.status_code == 500 + assert exc.value.is_error() dfile.close() def test_542_truncate_ids_client_side(self): diff --git a/tests/unit/api_build_test.py b/tests/unit/api_build_test.py index 59470caa5f..7e07a2695e 100644 --- a/tests/unit/api_build_test.py +++ b/tests/unit/api_build_test.py @@ -65,7 +65,7 @@ def test_build_container_custom_context_gzip(self): ) def test_build_remote_with_registry_auth(self): - self.client._auth_configs = { + self.client._auth_configs = auth.AuthConfig({ 'auths': { 'https://example.com': { 'user': 'example', @@ -73,7 +73,7 @@ def test_build_remote_with_registry_auth(self): 'email': 'example@example.com' } } - } + }) expected_params = {'t': None, 'q': False, 'dockerfile': None, 'rm': False, 'nocache': False, 'pull': False, @@ -81,7 +81,7 @@ def test_build_remote_with_registry_auth(self): 'remote': 'https://github.com/docker-library/mongo'} expected_headers = { 'X-Registry-Config': auth.encode_header( - self.client._auth_configs['auths'] + self.client._auth_configs.auths ) } @@ -115,7 +115,7 @@ def test_build_container_invalid_container_limits(self): }) def test_set_auth_headers_with_empty_dict_and_auth_configs(self): - self.client._auth_configs = { + self.client._auth_configs = auth.AuthConfig({ 'auths': { 'https://example.com': { 'user': 'example', @@ -123,12 +123,12 @@ def test_set_auth_headers_with_empty_dict_and_auth_configs(self): 'email': 'example@example.com' } } - } + }) headers = {} expected_headers = { 'X-Registry-Config': auth.encode_header( - self.client._auth_configs['auths'] + self.client._auth_configs.auths ) } @@ -136,7 +136,7 @@ def test_set_auth_headers_with_empty_dict_and_auth_configs(self): assert headers == expected_headers def test_set_auth_headers_with_dict_and_auth_configs(self): - self.client._auth_configs = { + self.client._auth_configs = auth.AuthConfig({ 'auths': { 'https://example.com': { 'user': 'example', @@ -144,12 +144,12 @@ def test_set_auth_headers_with_dict_and_auth_configs(self): 'email': 'example@example.com' } } - } + }) headers = {'foo': 'bar'} expected_headers = { 'X-Registry-Config': auth.encode_header( - self.client._auth_configs['auths'] + self.client._auth_configs.auths ), 'foo': 'bar' } diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index af2bb1c202..f4d220a2c6 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -15,6 +15,7 @@ import requests from requests.packages import urllib3 import six +import struct from . import fake_api @@ -83,7 +84,7 @@ def fake_delete(self, url, *args, **kwargs): return fake_request('DELETE', url, *args, **kwargs) -def fake_read_from_socket(self, response, stream, tty=False): +def fake_read_from_socket(self, response, stream, tty=False, demux=False): return six.binary_type() @@ -105,8 +106,6 @@ def setUp(self): ) self.patcher.start() self.client = APIClient() - # Force-clear authconfig to avoid tampering with the tests - self.client._cfg = {'Configs': {}} def tearDown(self): self.client.close() @@ -221,13 +220,11 @@ def test_login(self): 'username': 'sakuya', 'password': 'izayoi' } assert args[1]['headers'] == {'Content-Type': 'application/json'} - assert self.client._auth_configs['auths'] == { - 'docker.io': { - 'email': None, - 'password': 'izayoi', - 'username': 'sakuya', - 'serveraddress': None, - } + assert self.client._auth_configs.auths['docker.io'] == { + 'email': None, + 'password': 'izayoi', + 'username': 'sakuya', + 'serveraddress': None, } def test_events(self): @@ -467,56 +464,124 @@ def test_early_stream_response(self): class TCPSocketStreamTest(unittest.TestCase): - text_data = b''' + stdout_data = b''' Now, those children out there, they're jumping through the flames in the hope that the god of the fire will make them fruitful. Really, you can't blame them. After all, what girl would not prefer the child of a god to that of some acne-scarred artisan? ''' + stderr_data = b''' + And what of the true God? To whose glory churches and monasteries have been + built on these islands for generations past? Now shall what of Him? + ''' - def setUp(self): - - self.server = six.moves.socketserver.ThreadingTCPServer( - ('', 0), self.get_handler_class() - ) - self.thread = threading.Thread(target=self.server.serve_forever) - self.thread.setDaemon(True) - self.thread.start() - self.address = 'http://{}:{}'.format( - socket.gethostname(), self.server.server_address[1] - ) - - def tearDown(self): - self.server.shutdown() - self.server.server_close() - self.thread.join() - - def get_handler_class(self): - text_data = self.text_data + @classmethod + def setup_class(cls): + cls.server = six.moves.socketserver.ThreadingTCPServer( + ('', 0), cls.get_handler_class()) + cls.thread = threading.Thread(target=cls.server.serve_forever) + cls.thread.setDaemon(True) + cls.thread.start() + cls.address = 'http://{}:{}'.format( + socket.gethostname(), cls.server.server_address[1]) + + @classmethod + def teardown_class(cls): + cls.server.shutdown() + cls.server.server_close() + cls.thread.join() + + @classmethod + def get_handler_class(cls): + stdout_data = cls.stdout_data + stderr_data = cls.stderr_data class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object): def do_POST(self): + resp_data = self.get_resp_data() self.send_response(101) self.send_header( - 'Content-Type', 'application/vnd.docker.raw-stream' - ) + 'Content-Type', 'application/vnd.docker.raw-stream') self.send_header('Connection', 'Upgrade') self.send_header('Upgrade', 'tcp') self.end_headers() self.wfile.flush() time.sleep(0.2) - self.wfile.write(text_data) + self.wfile.write(resp_data) self.wfile.flush() + def get_resp_data(self): + path = self.path.split('/')[-1] + if path == 'tty': + return stdout_data + stderr_data + elif path == 'no-tty': + data = b'' + data += self.frame_header(1, stdout_data) + data += stdout_data + data += self.frame_header(2, stderr_data) + data += stderr_data + return data + else: + raise Exception('Unknown path {0}'.format(path)) + + @staticmethod + def frame_header(stream, data): + return struct.pack('>BxxxL', stream, len(data)) + return Handler - def test_read_from_socket(self): + def request(self, stream=None, tty=None, demux=None): + assert stream is not None and tty is not None and demux is not None with APIClient(base_url=self.address) as client: - resp = client._post(client._url('/dummy'), stream=True) - data = client._read_from_socket(resp, stream=True, tty=True) - results = b''.join(data) - - assert results == self.text_data + if tty: + url = client._url('/tty') + else: + url = client._url('/no-tty') + resp = client._post(url, stream=True) + return client._read_from_socket( + resp, stream=stream, tty=tty, demux=demux) + + def test_read_from_socket_tty(self): + res = self.request(stream=True, tty=True, demux=False) + assert next(res) == self.stdout_data + self.stderr_data + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_tty_demux(self): + res = self.request(stream=True, tty=True, demux=True) + assert next(res) == (self.stdout_data + self.stderr_data, None) + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_tty(self): + res = self.request(stream=True, tty=False, demux=False) + assert next(res) == self.stdout_data + assert next(res) == self.stderr_data + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_tty_demux(self): + res = self.request(stream=True, tty=False, demux=True) + assert (self.stdout_data, None) == next(res) + assert (None, self.stderr_data) == next(res) + with self.assertRaises(StopIteration): + next(res) + + def test_read_from_socket_no_stream_tty(self): + res = self.request(stream=False, tty=True, demux=False) + assert res == self.stdout_data + self.stderr_data + + def test_read_from_socket_no_stream_tty_demux(self): + res = self.request(stream=False, tty=True, demux=True) + assert res == (self.stdout_data + self.stderr_data, None) + + def test_read_from_socket_no_stream_no_tty(self): + res = self.request(stream=False, tty=False, demux=False) + res == self.stdout_data + self.stderr_data + + def test_read_from_socket_no_stream_no_tty_demux(self): + res = self.request(stream=False, tty=False, demux=True) + assert res == (self.stdout_data, self.stderr_data) class UserAgentTest(unittest.TestCase): diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py index 947d680018..dc4d6f59ad 100644 --- a/tests/unit/auth_test.py +++ b/tests/unit/auth_test.py @@ -10,6 +10,7 @@ import unittest from docker import auth, errors +import dockerpycreds import pytest try: @@ -106,13 +107,13 @@ class ResolveAuthTest(unittest.TestCase): private_config = {'auth': encode_auth({'username': 'privateuser'})} legacy_config = {'auth': encode_auth({'username': 'legacyauth'})} - auth_config = { + auth_config = auth.AuthConfig({ 'auths': auth.parse_auth({ 'https://index.docker.io/v1/': index_config, 'my.registry.net': private_config, 'http://legacy.registry.url/v1/': legacy_config, }) - } + }) def test_resolve_authconfig_hostname_only(self): assert auth.resolve_authconfig( @@ -211,70 +212,21 @@ def test_resolve_registry_and_auth_unauthenticated_registry(self): ) is None def test_resolve_auth_with_empty_credstore_and_auth_dict(self): - auth_config = { + auth_config = auth.AuthConfig({ 'auths': auth.parse_auth({ 'https://index.docker.io/v1/': self.index_config, }), 'credsStore': 'blackbox' - } - with mock.patch('docker.auth._resolve_authconfig_credstore') as m: + }) + with mock.patch( + 'docker.auth.AuthConfig._resolve_authconfig_credstore' + ) as m: m.return_value = None assert 'indexuser' == auth.resolve_authconfig( auth_config, None )['username'] -class CredStoreTest(unittest.TestCase): - def test_get_credential_store(self): - auth_config = { - 'credHelpers': { - 'registry1.io': 'truesecret', - 'registry2.io': 'powerlock' - }, - 'credsStore': 'blackbox', - } - - assert auth.get_credential_store( - auth_config, 'registry1.io' - ) == 'truesecret' - assert auth.get_credential_store( - auth_config, 'registry2.io' - ) == 'powerlock' - assert auth.get_credential_store( - auth_config, 'registry3.io' - ) == 'blackbox' - - def test_get_credential_store_no_default(self): - auth_config = { - 'credHelpers': { - 'registry1.io': 'truesecret', - 'registry2.io': 'powerlock' - }, - } - assert auth.get_credential_store( - auth_config, 'registry2.io' - ) == 'powerlock' - assert auth.get_credential_store( - auth_config, 'registry3.io' - ) is None - - def test_get_credential_store_default_index(self): - auth_config = { - 'credHelpers': { - 'https://index.docker.io/v1/': 'powerlock' - }, - 'credsStore': 'truesecret' - } - - assert auth.get_credential_store(auth_config, None) == 'powerlock' - assert auth.get_credential_store( - auth_config, 'docker.io' - ) == 'powerlock' - assert auth.get_credential_store( - auth_config, 'images.io' - ) == 'truesecret' - - class LoadConfigTest(unittest.TestCase): def test_load_config_no_file(self): folder = tempfile.mkdtemp() @@ -293,8 +245,8 @@ def test_load_legacy_config(self): cfg = auth.load_config(cfg_path) assert auth.resolve_authconfig(cfg) is not None - assert cfg['auths'][auth.INDEX_NAME] is not None - cfg = cfg['auths'][auth.INDEX_NAME] + assert cfg.auths[auth.INDEX_NAME] is not None + cfg = cfg.auths[auth.INDEX_NAME] assert cfg['username'] == 'sakuya' assert cfg['password'] == 'izayoi' assert cfg['email'] == 'sakuya@scarlet.net' @@ -312,8 +264,8 @@ def test_load_json_config(self): ) cfg = auth.load_config(cfg_path) assert auth.resolve_authconfig(cfg) is not None - assert cfg['auths'][auth.INDEX_URL] is not None - cfg = cfg['auths'][auth.INDEX_URL] + assert cfg.auths[auth.INDEX_URL] is not None + cfg = cfg.auths[auth.INDEX_URL] assert cfg['username'] == 'sakuya' assert cfg['password'] == 'izayoi' assert cfg['email'] == email @@ -335,8 +287,8 @@ def test_load_modern_json_config(self): }, f) cfg = auth.load_config(cfg_path) assert auth.resolve_authconfig(cfg) is not None - assert cfg['auths'][auth.INDEX_URL] is not None - cfg = cfg['auths'][auth.INDEX_URL] + assert cfg.auths[auth.INDEX_URL] is not None + cfg = cfg.auths[auth.INDEX_URL] assert cfg['username'] == 'sakuya' assert cfg['password'] == 'izayoi' assert cfg['email'] == email @@ -360,7 +312,7 @@ def test_load_config_with_random_name(self): with open(dockercfg_path, 'w') as f: json.dump(config, f) - cfg = auth.load_config(dockercfg_path)['auths'] + cfg = auth.load_config(dockercfg_path).auths assert registry in cfg assert cfg[registry] is not None cfg = cfg[registry] @@ -387,7 +339,7 @@ def test_load_config_custom_config_env(self): json.dump(config, f) with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): - cfg = auth.load_config(None)['auths'] + cfg = auth.load_config(None).auths assert registry in cfg assert cfg[registry] is not None cfg = cfg[registry] @@ -417,8 +369,8 @@ def test_load_config_custom_config_env_with_auths(self): with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): cfg = auth.load_config(None) - assert registry in cfg['auths'] - cfg = cfg['auths'][registry] + assert registry in cfg.auths + cfg = cfg.auths[registry] assert cfg['username'] == 'sakuya' assert cfg['password'] == 'izayoi' assert cfg['email'] == 'sakuya@scarlet.net' @@ -446,8 +398,8 @@ def test_load_config_custom_config_env_utf8(self): with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): cfg = auth.load_config(None) - assert registry in cfg['auths'] - cfg = cfg['auths'][registry] + assert registry in cfg.auths + cfg = cfg.auths[registry] assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8') assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8') assert cfg['email'] == 'sakuya@scarlet.net' @@ -464,7 +416,7 @@ def test_load_config_unknown_keys(self): json.dump(config, f) cfg = auth.load_config(dockercfg_path) - assert cfg == {'auths': {}} + assert dict(cfg) == {'auths': {}} def test_load_config_invalid_auth_dict(self): folder = tempfile.mkdtemp() @@ -479,7 +431,7 @@ def test_load_config_invalid_auth_dict(self): json.dump(config, f) cfg = auth.load_config(dockercfg_path) - assert cfg == {'auths': {'scarlet.net': {}}} + assert dict(cfg) == {'auths': {'scarlet.net': {}}} def test_load_config_identity_token(self): folder = tempfile.mkdtemp() @@ -500,7 +452,236 @@ def test_load_config_identity_token(self): json.dump(config, f) cfg = auth.load_config(dockercfg_path) - assert registry in cfg['auths'] - cfg = cfg['auths'][registry] + assert registry in cfg.auths + cfg = cfg.auths[registry] assert 'IdentityToken' in cfg assert cfg['IdentityToken'] == token + + +class CredstoreTest(unittest.TestCase): + def setUp(self): + self.authconfig = auth.AuthConfig({'credsStore': 'default'}) + self.default_store = InMemoryStore('default') + self.authconfig._stores['default'] = self.default_store + self.default_store.store( + 'https://gensokyo.jp/v2', 'sakuya', 'izayoi', + ) + self.default_store.store( + 'https://default.com/v2', 'user', 'hunter2', + ) + + def test_get_credential_store(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + 'credsStore': 'blackbox', + }) + + assert auth_config.get_credential_store('registry1.io') == 'truesecret' + assert auth_config.get_credential_store('registry2.io') == 'powerlock' + assert auth_config.get_credential_store('registry3.io') == 'blackbox' + + def test_get_credential_store_no_default(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + }) + assert auth_config.get_credential_store('registry2.io') == 'powerlock' + assert auth_config.get_credential_store('registry3.io') is None + + def test_get_credential_store_default_index(self): + auth_config = auth.AuthConfig({ + 'credHelpers': { + 'https://index.docker.io/v1/': 'powerlock' + }, + 'credsStore': 'truesecret' + }) + + assert auth_config.get_credential_store(None) == 'powerlock' + assert auth_config.get_credential_store('docker.io') == 'powerlock' + assert auth_config.get_credential_store('images.io') == 'truesecret' + + def test_get_credential_store_with_plain_dict(self): + auth_config = { + 'credHelpers': { + 'registry1.io': 'truesecret', + 'registry2.io': 'powerlock' + }, + 'credsStore': 'blackbox', + } + + assert auth.get_credential_store( + auth_config, 'registry1.io' + ) == 'truesecret' + assert auth.get_credential_store( + auth_config, 'registry2.io' + ) == 'powerlock' + assert auth.get_credential_store( + auth_config, 'registry3.io' + ) == 'blackbox' + + def test_get_all_credentials_credstore_only(self): + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_with_empty_credhelper(self): + self.authconfig['credHelpers'] = { + 'registry1.io': 'truesecret', + } + self.authconfig._stores['truesecret'] = InMemoryStore() + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': None, + } + + def test_get_all_credentials_with_credhelpers_only(self): + del self.authconfig['credsStore'] + assert self.authconfig.get_all_credentials() == {} + + self.authconfig['credHelpers'] = { + 'https://gensokyo.jp/v2': 'default', + 'https://default.com/v2': 'default', + } + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_with_auths_entries(self): + self.authconfig.add_auth('registry1.io', { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }, + } + + def test_get_all_credentials_helpers_override_default(self): + self.authconfig['credHelpers'] = { + 'https://default.com/v2': 'truesecret', + } + truesecret = InMemoryStore('truesecret') + truesecret.store('https://default.com/v2', 'reimu', 'hakurei') + self.authconfig._stores['truesecret'] = truesecret + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'reimu', + 'Password': 'hakurei', + 'ServerAddress': 'https://default.com/v2', + }, + } + + def test_get_all_credentials_3_sources(self): + self.authconfig['credHelpers'] = { + 'registry1.io': 'truesecret', + } + truesecret = InMemoryStore('truesecret') + truesecret.store('registry1.io', 'reimu', 'hakurei') + self.authconfig._stores['truesecret'] = truesecret + self.authconfig.add_auth('registry2.io', { + 'ServerAddress': 'registry2.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }) + + assert self.authconfig.get_all_credentials() == { + 'https://gensokyo.jp/v2': { + 'Username': 'sakuya', + 'Password': 'izayoi', + 'ServerAddress': 'https://gensokyo.jp/v2', + }, + 'https://default.com/v2': { + 'Username': 'user', + 'Password': 'hunter2', + 'ServerAddress': 'https://default.com/v2', + }, + 'registry1.io': { + 'ServerAddress': 'registry1.io', + 'Username': 'reimu', + 'Password': 'hakurei', + }, + 'registry2.io': { + 'ServerAddress': 'registry2.io', + 'Username': 'reimu', + 'Password': 'hakurei', + } + } + + +class InMemoryStore(dockerpycreds.Store): + def __init__(self, *args, **kwargs): + self.__store = {} + + def get(self, server): + try: + return self.__store[server] + except KeyError: + raise dockerpycreds.errors.CredentialsNotFound() + + def store(self, server, username, secret): + self.__store[server] = { + 'ServerURL': server, + 'Username': username, + 'Secret': secret, + } + + def list(self): + return dict( + [(k, v['Username']) for k, v in self.__store.items()] + ) + + def erase(self, server): + del self.__store[server] diff --git a/tests/unit/dockertypes_test.py b/tests/unit/dockertypes_test.py index cdacf8cd5b..0689d07b32 100644 --- a/tests/unit/dockertypes_test.py +++ b/tests/unit/dockertypes_test.py @@ -14,7 +14,7 @@ try: from unittest import mock -except: +except: # noqa: E722 import mock diff --git a/tests/unit/errors_test.py b/tests/unit/errors_test.py index e27a9b1975..2134f86f04 100644 --- a/tests/unit/errors_test.py +++ b/tests/unit/errors_test.py @@ -79,6 +79,27 @@ def test_is_client_error_400(self): err = APIError('', response=resp) assert err.is_client_error() is True + def test_is_error_300(self): + """Report no error on 300 response.""" + resp = requests.Response() + resp.status_code = 300 + err = APIError('', response=resp) + assert err.is_error() is False + + def test_is_error_400(self): + """Report error on 400 response.""" + resp = requests.Response() + resp.status_code = 400 + err = APIError('', response=resp) + assert err.is_error() is True + + def test_is_error_500(self): + """Report error on 500 response.""" + resp = requests.Response() + resp.status_code = 500 + err = APIError('', response=resp) + assert err.is_error() is True + def test_create_error_from_exception(self): resp = requests.Response() resp.status_code = 500 diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py index 39e409e4bf..f44e365851 100644 --- a/tests/unit/models_containers_test.py +++ b/tests/unit/models_containers_test.py @@ -416,10 +416,11 @@ def test_exec_run(self): client.api.exec_create.assert_called_with( FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True, stdin=False, tty=False, privileged=True, user='', environment=None, - workdir=None + workdir=None, ) client.api.exec_start.assert_called_with( - FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False + FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False, + demux=False, ) def test_exec_run_failure(self): @@ -429,10 +430,11 @@ def test_exec_run_failure(self): client.api.exec_create.assert_called_with( FAKE_CONTAINER_ID, "docker ps", stdout=True, stderr=True, stdin=False, tty=False, privileged=True, user='', environment=None, - workdir=None + workdir=None, ) client.api.exec_start.assert_called_with( - FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False + FAKE_EXEC_ID, detach=False, tty=False, stream=False, socket=False, + demux=False, ) def test_export(self): diff --git a/tests/unit/utils_proxy_test.py b/tests/unit/utils_proxy_test.py new file mode 100644 index 0000000000..ff0e14ba74 --- /dev/null +++ b/tests/unit/utils_proxy_test.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +import unittest +import six + +from docker.utils.proxy import ProxyConfig + +HTTP = 'http://test:80' +HTTPS = 'https://test:443' +FTP = 'ftp://user:password@host:23' +NO_PROXY = 'localhost,.localdomain' +CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY) +ENV = { + 'http_proxy': HTTP, + 'HTTP_PROXY': HTTP, + 'https_proxy': HTTPS, + 'HTTPS_PROXY': HTTPS, + 'ftp_proxy': FTP, + 'FTP_PROXY': FTP, + 'no_proxy': NO_PROXY, + 'NO_PROXY': NO_PROXY, +} + + +class ProxyConfigTest(unittest.TestCase): + + def test_from_dict(self): + config = ProxyConfig.from_dict({ + 'httpProxy': HTTP, + 'httpsProxy': HTTPS, + 'ftpProxy': FTP, + 'noProxy': NO_PROXY + }) + self.assertEqual(CONFIG.http, config.http) + self.assertEqual(CONFIG.https, config.https) + self.assertEqual(CONFIG.ftp, config.ftp) + self.assertEqual(CONFIG.no_proxy, config.no_proxy) + + def test_new(self): + config = ProxyConfig() + self.assertIsNone(config.http) + self.assertIsNone(config.https) + self.assertIsNone(config.ftp) + self.assertIsNone(config.no_proxy) + + config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d') + self.assertEqual(config.http, 'a') + self.assertEqual(config.https, 'b') + self.assertEqual(config.ftp, 'c') + self.assertEqual(config.no_proxy, 'd') + + def test_truthiness(self): + assert not ProxyConfig() + assert ProxyConfig(http='non-zero') + assert ProxyConfig(https='non-zero') + assert ProxyConfig(ftp='non-zero') + assert ProxyConfig(no_proxy='non-zero') + + def test_environment(self): + self.assertDictEqual(CONFIG.get_environment(), ENV) + empty = ProxyConfig() + self.assertDictEqual(empty.get_environment(), {}) + + def test_inject_proxy_environment(self): + # Proxy config is non null, env is None. + self.assertSetEqual( + set(CONFIG.inject_proxy_environment(None)), + set(['{}={}'.format(k, v) for k, v in six.iteritems(ENV)])) + + # Proxy config is null, env is None. + self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None) + + env = ['FOO=BAR', 'BAR=BAZ'] + + # Proxy config is non null, env is non null + actual = CONFIG.inject_proxy_environment(env) + expected = ['{}={}'.format(k, v) for k, v in six.iteritems(ENV)] + env + # It's important that the first 8 variables are the ones from the proxy + # config, and the last 2 are the ones from the input environment + self.assertSetEqual(set(actual[:8]), set(expected[:8])) + self.assertSetEqual(set(actual[-2:]), set(expected[-2:])) + + # Proxy is null, and is non null + self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env)