diff --git a/Jenkinsfile b/Jenkinsfile
index b73a78eb95..566d5494c1 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -4,7 +4,10 @@ def imageNameBase = "dockerbuildbot/docker-py"
def imageNamePy2
def imageNamePy3
def images = [:]
-def dockerVersions = ["1.12.0", "1.13.0-rc3"]
+
+// Note: Swarm in dind seem notoriously flimsy with 1.12.1+, which is why we're
+// sticking with 1.12.0 for the 1.12 series
+def dockerVersions = ["1.12.0", "1.13.1"]
def buildImage = { name, buildargs, pyTag ->
img = docker.image(name)
@@ -31,10 +34,16 @@ def buildImages = { ->
}
}
+def getAPIVersion = { engineVersion ->
+ def versionMap = ['1.12': '1.24', '1.13': '1.25']
+ return versionMap[engineVersion.substring(0, 4)]
+}
+
def runTests = { Map settings ->
def dockerVersion = settings.get("dockerVersion", null)
def pythonVersion = settings.get("pythonVersion", null)
def testImage = settings.get("testImage", null)
+ def apiVersion = getAPIVersion(dockerVersion)
if (!testImage) {
throw new Exception("Need test image object, e.g.: `runTests(testImage: img)`")
@@ -50,8 +59,8 @@ def runTests = { Map settings ->
wrappedNode(label: "ubuntu && !zfs && amd64", cleanWorkspace: true) {
stage("test python=${pythonVersion} / docker=${dockerVersion}") {
checkout(scm)
- def dindContainerName = "dpy-dind-\$BUILD_NUMBER-\$EXECUTOR_NUMBER"
- def testContainerName = "dpy-tests-\$BUILD_NUMBER-\$EXECUTOR_NUMBER"
+ def dindContainerName = "dpy-dind-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
+ def testContainerName = "dpy-tests-\$BUILD_NUMBER-\$EXECUTOR_NUMBER-${pythonVersion}-${dockerVersion}"
try {
sh """docker run -d --name ${dindContainerName} -v /tmp --privileged \\
dockerswarm/dind:${dockerVersion} docker daemon -H tcp://0.0.0.0:2375
@@ -59,6 +68,7 @@ def runTests = { Map settings ->
sh """docker run \\
--name ${testContainerName} --volumes-from ${dindContainerName} \\
-e 'DOCKER_HOST=tcp://docker:2375' \\
+ -e 'DOCKER_TEST_API_VERSION=${apiVersion}' \\
--link=${dindContainerName}:docker \\
${testImage} \\
py.test -v -rxs tests/integration
diff --git a/MANIFEST.in b/MANIFEST.in
index ee6cdbbd6f..41b3fa9f8b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,3 +5,4 @@ include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
+recursive-include tests/integration/testdata *
diff --git a/Makefile b/Makefile
index 8727ada4dc..148c50a4c0 100644
--- a/Makefile
+++ b/Makefile
@@ -44,12 +44,12 @@ integration-test-py3: build-py3
.PHONY: integration-dind
integration-dind: build build-py3
docker rm -vf dpy-dind || :
- docker run -d --name dpy-dind --privileged dockerswarm/dind:1.13.0-rc3 docker daemon\
+ docker run -d --name dpy-dind --privileged dockerswarm/dind:1.13.0 docker daemon\
-H tcp://0.0.0.0:2375
- docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-sdk-python\
- py.test tests/integration
- docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --link=dpy-dind:docker docker-sdk-python3\
- py.test tests/integration
+ docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TEST_API_VERSION=1.25"\
+ --link=dpy-dind:docker docker-sdk-python py.test tests/integration
+ docker run --rm --env="DOCKER_HOST=tcp://docker:2375" --env="DOCKER_TEST_API_VERSION=1.25"\
+ --link=dpy-dind:docker docker-sdk-python3 py.test tests/integration
docker rm -vf dpy-dind
.PHONY: integration-dind-ssl
@@ -57,14 +57,14 @@ integration-dind-ssl: build-dind-certs build build-py3
docker run -d --name dpy-dind-certs dpy-dind-certs
docker run -d --env="DOCKER_HOST=tcp://localhost:2375" --env="DOCKER_TLS_VERIFY=1"\
--env="DOCKER_CERT_PATH=/certs" --volumes-from dpy-dind-certs --name dpy-dind-ssl\
- -v /tmp --privileged dockerswarm/dind:1.13.0-rc3 docker daemon --tlsverify\
+ -v /tmp --privileged dockerswarm/dind:1.13.0 docker daemon --tlsverify\
--tlscacert=/certs/ca.pem --tlscert=/certs/server-cert.pem\
--tlskey=/certs/server-key.pem -H tcp://0.0.0.0:2375
docker run --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
- --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
+ --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=1.25"\
--link=dpy-dind-ssl:docker docker-sdk-python py.test tests/integration
docker run --rm --volumes-from dpy-dind-ssl --env="DOCKER_HOST=tcp://docker:2375"\
- --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs"\
+ --env="DOCKER_TLS_VERIFY=1" --env="DOCKER_CERT_PATH=/certs" --env="DOCKER_TEST_API_VERSION=1.25"\
--link=dpy-dind-ssl:docker docker-sdk-python3 py.test tests/integration
docker rm -vf dpy-dind-ssl dpy-dind-certs
diff --git a/README.md b/README.md
index d80d9307f0..38963b325c 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ A Python library for the Docker Engine API. It lets you do anything the `docker`
## Installation
-The latest stable version [is available on PyPi](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
diff --git a/docker/api/build.py b/docker/api/build.py
index eb01bce389..5c34c47b38 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -1,11 +1,11 @@
+import json
import logging
import os
import re
-import json
+from .. import auth
from .. import constants
from .. import errors
-from .. import auth
from .. import utils
@@ -18,7 +18,7 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
- labels=None):
+ labels=None, cache_from=None):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
@@ -92,6 +92,8 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None,
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB.
labels (dict): A dictionary of labels to set on the image.
+ cache_from (list): A list of images used for build cache
+ resolution.
Returns:
A generator for the build output.
@@ -188,6 +190,14 @@ def build(self, path=None, tag=None, quiet=False, fileobj=None,
'labels was only introduced in API version 1.23'
)
+ if cache_from:
+ if utils.version_gte(self._version, '1.25'):
+ params.update({'cachefrom': json.dumps(cache_from)})
+ else:
+ raise errors.InvalidVersion(
+ 'cache_from was only introduced in API version 1.25'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
diff --git a/docker/api/client.py b/docker/api/client.py
index 22c32b44d9..99d7879cb8 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -14,6 +14,8 @@
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
@@ -46,11 +48,13 @@ class APIClient(
ExecApiMixin,
ImageApiMixin,
NetworkApiMixin,
+ PluginApiMixin,
+ SecretApiMixin,
ServiceApiMixin,
SwarmApiMixin,
VolumeApiMixin):
"""
- A low-level client for the Docker Remote API.
+ A low-level client for the Docker Engine API.
Example:
@@ -225,10 +229,12 @@ def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
- if data is not None:
+ if data is not None and isinstance(data, dict):
for k, v in six.iteritems(data):
if v is not None:
data2[k] = v
+ elif data is not None:
+ data2 = data
if 'headers' not in kwargs:
kwargs['headers'] = {}
@@ -302,11 +308,13 @@ def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
+ buf_length = len(buf)
walker = 0
while True:
- if len(buf[walker:]) < 8:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
- _, length = struct.unpack_from('>BxxxL', buf[walker:])
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ _, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
diff --git a/docker/api/container.py b/docker/api/container.py
index efcae9b0c6..453e378516 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -108,7 +108,7 @@ def commit(self, container, repository=None, tag=None, message=None,
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
- `Remote API documentation
+ `Engine API documentation
`_
for full details.
@@ -238,7 +238,7 @@ def create_container(self, image, command=None, hostname=None, user=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None,
- healthcheck=None):
+ healthcheck=None, stop_timeout=None):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
@@ -313,9 +313,10 @@ def create_container(self, image, command=None, hostname=None, user=None,
**Using volumes**
- Volume declaration is done in two parts. Provide a list of mountpoints
- to the with the ``volumes`` parameter, and declare mappings in the
- ``host_config`` section.
+ Volume declaration is done in two parts. Provide a list of
+ paths to use as mountpoints inside the container with the
+ ``volumes`` parameter, and declare mappings from paths on the host
+ in the ``host_config`` section.
.. code-block:: python
@@ -392,7 +393,8 @@ def create_container(self, image, command=None, hostname=None, user=None,
version 1.10. Use ``host_config`` instead.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
- volumes (str or list):
+ volumes (str or list): List of paths inside the container to use
+ as volumes.
volumes_from (:py:class:`list`): List of container names or Ids to
get volumes from.
network_disabled (bool): Disable networking
@@ -411,6 +413,8 @@ def create_container(self, image, command=None, hostname=None, user=None,
volume_driver (str): The name of a volume driver/plugin.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
+ stop_timeout (int): Timeout to stop the container, in seconds.
+ Default: 10
networking_config (dict): A networking configuration generated
by :py:meth:`create_networking_config`.
@@ -437,6 +441,7 @@ def create_container(self, image, command=None, hostname=None, user=None,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
volume_driver, stop_signal, networking_config, healthcheck,
+ stop_timeout
)
return self.create_container_from_config(config, name)
@@ -457,6 +462,8 @@ def create_host_config(self, *args, **kwargs):
:py:meth:`create_container`.
Args:
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
binds (dict): Volumes to bind. See :py:meth:`create_container`
for more information.
blkio_weight_device: Block IO weight (relative device weight) in
@@ -542,6 +549,8 @@ def create_host_config(self, *args, **kwargs):
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
@@ -906,9 +915,6 @@ def put_archive(self, container, path, data):
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
-
- Raises:
- :py:class:`~docker.errors.APIError` If an error occurs.
"""
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
@@ -916,6 +922,28 @@ def put_archive(self, container, path, data):
self._raise_for_status(res)
return res.status_code == 200
+ @utils.minimum_version('1.25')
+ def prune_containers(self, filters=None):
+ """
+ Delete stopped containers
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted container IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/containers/prune')
+ return self._result(self._post(url, params=params), True)
+
@utils.check_resource
def remove_container(self, container, v=False, link=False, force=False):
"""
diff --git a/docker/api/image.py b/docker/api/image.py
index c1ebc69ca6..09eb086d78 100644
--- a/docker/api/image.py
+++ b/docker/api/image.py
@@ -274,6 +274,31 @@ def load_image(self, data):
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
+ @utils.minimum_version('1.25')
+ def prune_images(self, filters=None):
+ """
+ Delete unused images
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+ Available filters:
+ - dangling (bool): When set to true (or 1), prune only
+ unused and untagged images.
+
+ Returns:
+ (dict): A dict containing a list of deleted image IDs and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/images/prune")
+ params = {}
+ if filters is not None:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._post(url, params=params), True)
+
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
"""
diff --git a/docker/api/network.py b/docker/api/network.py
index 9f6d98fea3..9652228de1 100644
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -133,6 +133,28 @@ def create_network(self, name, driver=None, options=None, ipam=None,
res = self._post_json(url, data=data)
return self._result(res, json=True)
+ @minimum_version('1.25')
+ def prune_networks(self, filters=None):
+ """
+ Delete unused networks
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted network names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/networks/prune')
+ return self._result(self._post(url, params=params), True)
+
@minimum_version('1.21')
def remove_network(self, net_id):
"""
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
new file mode 100644
index 0000000000..772d263387
--- /dev/null
+++ b/docker/api/plugin.py
@@ -0,0 +1,214 @@
+import six
+
+from .. import auth, utils
+
+
+class PluginApiMixin(object):
+ @utils.minimum_version('1.25')
+ @utils.check_resource
+ def configure_plugin(self, name, options):
+ """
+ Configure a plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ options (dict): A key-value mapping of options
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/set', name)
+ data = options
+ if isinstance(data, dict):
+ data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
+ res = self._post_json(url, data=data)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def create_plugin(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/create')
+
+ with utils.create_archive(root=plugin_data_dir, gzip=gzip) as archv:
+ res = self._post(url, params={'name': name}, data=archv)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def disable_plugin(self, name):
+ """
+ Disable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/disable', name)
+ res = self._post(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def enable_plugin(self, name, timeout=0):
+ """
+ Enable an installed plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ timeout (int): Operation timeout (in seconds). Default: 0
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/enable', name)
+ params = {'timeout': timeout}
+ res = self._post(url, params=params)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def inspect_plugin(self, name):
+ """
+ Retrieve plugin metadata.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+
+ Returns:
+ A dict containing plugin info
+ """
+ url = self._url('/plugins/{0}/json', name)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def pull_plugin(self, remote, privileges, name=None):
+ """
+ Pull and install a plugin. After the plugin is installed, it can be
+ enabled using :py:meth:`~enable_plugin`.
+
+ Args:
+ remote (string): Remote reference for the plugin to install.
+ The ``:latest`` tag is optional, and is the default if
+ omitted.
+ privileges (list): A list of privileges the user consents to
+ grant to the plugin. Can be retrieved using
+ :py:meth:`~plugin_privileges`.
+ name (string): Local name for the pulled plugin. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ An iterable object streaming the decoded API logs
+ """
+ url = self._url('/plugins/pull')
+ params = {
+ 'remote': remote,
+ }
+ if name:
+ params['name'] = name
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(remote)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self._post_json(
+ url, params=params, headers=headers, data=privileges,
+ stream=True
+ )
+ self._raise_for_status(response)
+ return self._stream_helper(response, decode=True)
+
+ @utils.minimum_version('1.25')
+ def plugins(self):
+ """
+ Retrieve a list of installed plugins.
+
+ Returns:
+ A list of dicts, one per plugin
+ """
+ url = self._url('/plugins')
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ def plugin_privileges(self, name):
+ """
+ Retrieve list of privileges to be granted to a plugin.
+
+ Args:
+ name (string): Name of the remote plugin to examine. The
+ ``:latest`` tag is optional, and is the default if omitted.
+
+ Returns:
+ A list of dictionaries representing the plugin's
+ permissions
+
+ """
+ params = {
+ 'remote': name,
+ }
+
+ url = self._url('/plugins/privileges')
+ return self._result(self._get(url, params=params), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource
+ def push_plugin(self, name):
+ """
+ Push a plugin to the registry.
+
+ Args:
+ name (string): Name of the plugin to upload. The ``:latest``
+ tag is optional, and is the default if omitted.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}/pull', name)
+
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ res = self._post(url, headers=headers)
+ self._raise_for_status(res)
+ return self._stream_helper(res, decode=True)
+
+ @utils.minimum_version('1.25')
+ def remove_plugin(self, name, force=False):
+ """
+ Remove an installed plugin.
+
+ Args:
+ name (string): Name of the plugin to remove. The ``:latest``
+ tag is optional, and is the default if omitted.
+ force (bool): Disable the plugin before removing. This may
+ result in issues if the plugin is in use by a container.
+
+ Returns:
+ ``True`` if successful
+ """
+ url = self._url('/plugins/{0}', name)
+ res = self._delete(url, params={'force': force})
+ self._raise_for_status(res)
+ return True
diff --git a/docker/api/secret.py b/docker/api/secret.py
new file mode 100644
index 0000000000..03534a6236
--- /dev/null
+++ b/docker/api/secret.py
@@ -0,0 +1,91 @@
+import base64
+
+import six
+
+from .. import utils
+
+
+class SecretApiMixin(object):
+ @utils.minimum_version('1.25')
+ def create_secret(self, name, data, labels=None):
+ """
+ Create a secret
+
+ Args:
+ name (string): Name of the secret
+ data (bytes): Secret data to be stored
+ labels (dict): A mapping of labels to assign to the secret
+
+ Returns (dict): ID of the newly created secret
+ """
+ if not isinstance(data, bytes):
+ data = data.encode('utf-8')
+
+ data = base64.b64encode(data)
+ if six.PY3:
+ data = data.decode('ascii')
+ body = {
+ 'Data': data,
+ 'Name': name,
+ 'Labels': labels
+ }
+
+ url = self._url('/secrets/create')
+ return self._result(
+ self._post_json(url, data=body), True
+ )
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource
+ def inspect_secret(self, id):
+ """
+ Retrieve secret metadata
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (dict): A dictionary of metadata
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ return self._result(self._get(url), True)
+
+ @utils.minimum_version('1.25')
+ @utils.check_resource
+ def remove_secret(self, id):
+ """
+ Remove a secret
+
+ Args:
+ id (string): Full ID of the secret to remove
+
+ Returns (boolean): True if successful
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ if no secret with that ID exists
+ """
+ url = self._url('/secrets/{0}', id)
+ res = self._delete(url)
+ self._raise_for_status(res)
+ return True
+
+ @utils.minimum_version('1.25')
+ def secrets(self, filters=None):
+ """
+ List secrets
+
+ Args:
+ filters (dict): A map of filters to process on the secrets
+ list. Available filters: ``names``
+
+ Returns (list): A list of secrets
+ """
+ url = self._url('/secrets')
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ return self._result(self._get(url, params=params), True)
diff --git a/docker/api/service.py b/docker/api/service.py
index d2621e685c..0b2abdc9af 100644
--- a/docker/api/service.py
+++ b/docker/api/service.py
@@ -62,10 +62,24 @@ def create_service(
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
- 'UpdateConfig': update_config,
'Networks': utils.convert_service_networks(networks),
'EndpointSpec': endpoint_spec
}
+
+ if update_config is not None:
+ if utils.version_lt(self._version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.max_failure_ratio is not supported in'
+ ' API version < 1.25'
+ )
+ if 'Monitor' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.monitor is not supported in'
+ ' API version < 1.25'
+ )
+ data['UpdateConfig'] = update_config
+
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@@ -230,6 +244,12 @@ def update_service(self, service, version, task_template=None, name=None,
mode = ServiceMode(mode)
data['Mode'] = mode
if task_template is not None:
+ if 'ForceUpdate' in task_template and utils.version_lt(
+ self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force_update is not supported in API version < 1.25'
+ )
+
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is not None:
registry, repo_name = auth.resolve_repository_name(image)
@@ -238,7 +258,19 @@ def update_service(self, service, version, task_template=None, name=None,
headers['X-Registry-Auth'] = auth_header
data['TaskTemplate'] = task_template
if update_config is not None:
+ if utils.version_lt(self._version, '1.25'):
+ if 'MaxFailureRatio' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.max_failure_ratio is not supported in'
+ ' API version < 1.25'
+ )
+ if 'Monitor' in update_config:
+ raise errors.InvalidVersion(
+ 'UpdateConfig.monitor is not supported in'
+ ' API version < 1.25'
+ )
data['UpdateConfig'] = update_config
+
if networks is not None:
data['Networks'] = utils.convert_service_networks(networks)
if endpoint_spec is not None:
diff --git a/docker/api/volume.py b/docker/api/volume.py
index 9c6d5f8351..ce911c8fcd 100644
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -38,7 +38,8 @@ def volumes(self, filters=None):
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.21')
- def create_volume(self, name, driver=None, driver_opts=None, labels=None):
+ def create_volume(self, name=None, driver=None, driver_opts=None,
+ labels=None):
"""
Create and register a named volume
@@ -64,7 +65,8 @@ def create_volume(self, name, driver=None, driver_opts=None, labels=None):
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
- u'Name': u'foobar'}
+ u'Name': u'foobar',
+ u'Scope': u'local'}
"""
url = self._url('/volumes/create')
@@ -114,18 +116,50 @@ def inspect_volume(self, name):
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
+ @utils.minimum_version('1.25')
+ def prune_volumes(self, filters=None):
+ """
+ Delete unused volumes
+
+ Args:
+ filters (dict): Filters to process on the prune list.
+
+ Returns:
+ (dict): A dict containing a list of deleted volume names and
+ the amount of disk space reclaimed in bytes.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ params = {}
+ if filters:
+ params['filters'] = utils.convert_filters(filters)
+ url = self._url('/volumes/prune')
+ return self._result(self._post(url, params=params), True)
+
@utils.minimum_version('1.21')
- def remove_volume(self, name):
+ def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
Raises:
-
- ``docker.errors.APIError``: If volume failed to remove.
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
"""
- url = self._url('/volumes/{0}', name)
+ params = {}
+ if force:
+ if utils.version_lt(self._version, '1.25'):
+ raise errors.InvalidVersion(
+ 'force removal was introduced in API 1.25'
+ )
+ params = {'force': force}
+
+ url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
diff --git a/docker/client.py b/docker/client.py
index 171175d328..09bda67f82 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -3,6 +3,8 @@
from .models.images import ImageCollection
from .models.networks import NetworkCollection
from .models.nodes import NodeCollection
+from .models.plugins import PluginCollection
+from .models.secrets import SecretCollection
from .models.services import ServiceCollection
from .models.swarm import Swarm
from .models.volumes import VolumeCollection
@@ -109,6 +111,21 @@ def nodes(self):
"""
return NodeCollection(client=self)
+ @property
+ def plugins(self):
+ """
+ An object for managing plugins on the server. See the
+ :doc:`plugins documentation ` for full details.
+ """
+ return PluginCollection(client=self)
+
+ def secrets(self):
+ """
+ An object for managing secrets on the server. See the
+ :doc:`secrets documentation ` for full details.
+ """
+ return SecretCollection(client=self)
+
@property
def services(self):
"""
diff --git a/docker/errors.py b/docker/errors.py
index 95c462b9d2..d9b197d1a3 100644
--- a/docker/errors.py
+++ b/docker/errors.py
@@ -22,7 +22,7 @@ def create_api_error_from_http_exception(e):
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
- 'not found: does not exist or no read access'
+ 'not found: does not exist or no pull access'
in str(explanation)):
cls = ImageNotFound
else:
diff --git a/docker/models/containers.py b/docker/models/containers.py
index b1cdd8f870..b7a77875ff 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -1,5 +1,6 @@
import copy
+from ..api import APIClient
from ..errors import (ContainerError, ImageNotFound,
create_unexpected_kwargs_error)
from ..types import HostConfig
@@ -78,7 +79,7 @@ def commit(self, repository=None, tag=None, **kwargs):
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
- `Remote API documentation
+ `Engine API documentation
`_
for full details.
@@ -121,7 +122,6 @@ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
- tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Stream response data. Default: False
Returns:
@@ -447,6 +447,8 @@ def run(self, image, command=None, stdout=True, stderr=False,
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
+ auto_remove (bool): enable auto-removal of the container on daemon
+ side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
@@ -584,6 +586,8 @@ def run(self, image, command=None, stdout=True, stderr=False,
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
+ storage_opt (dict): Storage driver options per container as a
+ key-value mapping.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
@@ -762,6 +766,10 @@ def list(self, all=False, before=None, filters=None, limit=-1, since=None):
since=since)
return [self.get(r['Id']) for r in resp]
+ def prune(self, filters=None):
+ return self.client.api.prune_containers(filters=filters)
+ prune.__doc__ = APIClient.prune_containers.__doc__
+
# kwargs to copy straight from run to create
RUN_CREATE_KWARGS = [
@@ -827,6 +835,7 @@ def list(self, all=False, before=None, filters=None, limit=-1, since=None):
'restart_policy',
'security_opt',
'shm_size',
+ 'storage_opt',
'sysctls',
'tmpfs',
'ulimits',
@@ -879,5 +888,15 @@ def _create_container_args(kwargs):
for p in sorted(port_bindings.keys())]
binds = create_kwargs['host_config'].get('Binds')
if binds:
- create_kwargs['volumes'] = [v.split(':')[0] for v in binds]
+ create_kwargs['volumes'] = [_host_volume_from_bind(v) for v in binds]
return create_kwargs
+
+
+def _host_volume_from_bind(bind):
+ bits = bind.split(':')
+ if len(bits) == 1:
+ return bits[0]
+ elif len(bits) == 2 and bits[1] in ('ro', 'rw'):
+ return bits[0]
+ else:
+ return bits[1]
diff --git a/docker/models/images.py b/docker/models/images.py
index 6f8f4fe273..51ee6f4ab9 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -62,10 +62,11 @@ def save(self):
Example:
- >>> image = cli.get("fedora:latest")
+ >>> image = cli.images.get("fedora:latest")
>>> resp = image.save()
>>> f = open('/tmp/fedora-latest.tar', 'w')
- >>> f.write(resp.data)
+ >>> for chunk in resp.stream():
+ >>> f.write(chunk)
>>> f.close()
"""
return self.client.api.get_image(self.id)
@@ -140,6 +141,8 @@ def build(self, **kwargs):
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``.
+ cache_from (list): A list of images used for build cache
+ resolution.
Returns:
(:py:class:`Image`): The built image.
@@ -160,10 +163,10 @@ def build(self, **kwargs):
return BuildError('Unknown')
event = events[-1]
if 'stream' in event:
- match = re.search(r'Successfully built ([0-9a-f]+)',
+ match = re.search(r'(Successfully built |sha256:)([0-9a-f]+)',
event.get('stream', ''))
if match:
- image_id = match.group(1)
+ image_id = match.group(2)
return self.get(image_id)
raise BuildError(event.get('error') or event)
@@ -267,3 +270,7 @@ def remove(self, *args, **kwargs):
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
+
+ def prune(self, filters=None):
+ return self.client.api.prune_images(filters=filters)
+ prune.__doc__ = APIClient.prune_images.__doc__
diff --git a/docker/models/networks.py b/docker/models/networks.py
index a80c9f5f8d..a712e9bc43 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -1,3 +1,4 @@
+from ..api import APIClient
from .containers import Container
from .resource import Model, Collection
@@ -180,3 +181,7 @@ def list(self, *args, **kwargs):
"""
resp = self.client.api.networks(*args, **kwargs)
return [self.prepare_model(item) for item in resp]
+
+ def prune(self, filters=None):
+ self.client.api.prune_networks(filters=filters)
+ prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/docker/models/plugins.py b/docker/models/plugins.py
new file mode 100644
index 0000000000..8b6ede95bf
--- /dev/null
+++ b/docker/models/plugins.py
@@ -0,0 +1,175 @@
+from .resource import Collection, Model
+
+
+class Plugin(Model):
+ """
+ A plugin on the server.
+ """
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ """
+ The plugin's name.
+ """
+ return self.attrs.get('Name')
+
+ @property
+ def enabled(self):
+ """
+ Whether the plugin is enabled.
+ """
+ return self.attrs.get('Enabled')
+
+ @property
+ def settings(self):
+ """
+ A dictionary representing the plugin's configuration.
+ """
+ return self.attrs.get('Settings')
+
+ def configure(self, options):
+ """
+ Update the plugin's settings.
+
+ Args:
+ options (dict): A key-value mapping of options.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.configure_plugin(self.name, options)
+ self.reload()
+
+ def disable(self):
+ """
+ Disable the plugin.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ self.client.api.disable_plugin(self.name)
+ self.reload()
+
+ def enable(self, timeout=0):
+ """
+ Enable the plugin.
+
+ Args:
+ timeout (int): Timeout in seconds. Default: 0
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ self.client.api.enable_plugin(self.name, timeout)
+ self.reload()
+
+ def push(self):
+ """
+ Push the plugin to a remote registry.
+
+ Returns:
+ A dict iterator streaming the status of the upload.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.push_plugin(self.name)
+
+ def remove(self, force=False):
+ """
+ Remove the plugin from the server.
+
+ Args:
+ force (bool): Remove even if the plugin is enabled.
+ Default: False
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.client.api.remove_plugin(self.name, force=force)
+
+
+class PluginCollection(Collection):
+ model = Plugin
+
+ def create(self, name, plugin_data_dir, gzip=False):
+ """
+ Create a new plugin.
+
+ Args:
+ name (string): The name of the plugin. The ``:latest`` tag is
+ optional, and is the default if omitted.
+ plugin_data_dir (string): Path to the plugin data directory.
+ Plugin data directory must contain the ``config.json``
+ manifest file and the ``rootfs`` directory.
+ gzip (bool): Compress the context using gzip. Default: False
+
+ Returns:
+ (:py:class:`Plugin`): The newly created plugin.
+ """
+ self.client.api.create_plugin(name, plugin_data_dir, gzip)
+ return self.get(name)
+
+ def get(self, name):
+ """
+ Gets a plugin.
+
+ Args:
+ name (str): The name of the plugin.
+
+ Returns:
+ (:py:class:`Plugin`): The plugin.
+
+ Raises:
+ :py:class:`docker.errors.NotFound` If the plugin does not
+ exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_plugin(name))
+
+ def install(self, remote_name, local_name=None):
+ """
+ Pull and install a plugin.
+
+ Args:
+ remote_name (string): Remote reference for the plugin to
+ install. The ``:latest`` tag is optional, and is the
+ default if omitted.
+ local_name (string): Local name for the pulled plugin.
+ The ``:latest`` tag is optional, and is the default if
+ omitted. Optional.
+
+ Returns:
+ (:py:class:`Plugin`): The installed plugin
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ privileges = self.client.api.plugin_privileges(remote_name)
+ it = self.client.api.pull_plugin(remote_name, privileges, local_name)
+ for data in it:
+ pass
+ return self.get(local_name or remote_name)
+
+ def list(self):
+ """
+ List plugins installed on the server.
+
+ Returns:
+ (list of :py:class:`Plugin`): The plugins.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.plugins()
+ return [self.prepare_model(r) for r in resp]
diff --git a/docker/models/secrets.py b/docker/models/secrets.py
new file mode 100644
index 0000000000..ca11edeb08
--- /dev/null
+++ b/docker/models/secrets.py
@@ -0,0 +1,69 @@
+from ..api import APIClient
+from .resource import Model, Collection
+
+
+class Secret(Model):
+ """A secret."""
+ id_attribute = 'ID'
+
+ def __repr__(self):
+ return "<%s: '%s'>" % (self.__class__.__name__, self.name)
+
+ @property
+ def name(self):
+ return self.attrs['Spec']['Name']
+
+ def remove(self):
+ """
+ Remove this secret.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If secret failed to remove.
+ """
+ return self.client.api.remove_secret(self.id)
+
+
+class SecretCollection(Collection):
+ """Secrets on the Docker server."""
+ model = Secret
+
+ def create(self, **kwargs):
+ obj = self.client.api.create_secret(**kwargs)
+ return self.prepare_model(obj)
+ create.__doc__ = APIClient.create_secret.__doc__
+
+ def get(self, secret_id):
+ """
+ Get a secret.
+
+ Args:
+ secret_id (str): Secret ID.
+
+ Returns:
+ (:py:class:`Secret`): The secret.
+
+ Raises:
+ :py:class:`docker.errors.NotFound`
+ If the secret does not exist.
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self.prepare_model(self.client.api.inspect_secret(secret_id))
+
+ def list(self, **kwargs):
+ """
+ List secrets. Similar to the ``docker secret ls`` command.
+
+ Args:
+ filters (dict): Server-side list filtering options.
+
+ Returns:
+ (list of :py:class:`Secret`): The secrets.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ resp = self.client.api.secrets(**kwargs)
+ return [self.prepare_model(obj) for obj in resp]
diff --git a/docker/models/services.py b/docker/models/services.py
index ef6c3e3a91..bd95b5f965 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -109,6 +109,8 @@ def create(self, image, command=None, **kwargs):
the service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
+ secrets (list of :py:class:`docker.types.SecretReference`): List
+ of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
@@ -179,6 +181,7 @@ def list(self, **kwargs):
'labels',
'mounts',
'stop_grace_period',
+ 'secrets',
]
# kwargs to copy straight over to TaskTemplate
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
index adfc51d920..d3d07ee711 100644
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -29,7 +29,7 @@ def version(self):
return self.attrs.get('Version').get('Index')
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, swarm_spec=None, **kwargs):
+ force_new_cluster=False, **kwargs):
"""
Initialize a new swarm on this Engine.
@@ -87,11 +87,11 @@ def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
)
"""
- init_kwargs = {}
- for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:
- if arg in kwargs:
- init_kwargs[arg] = kwargs[arg]
- del kwargs[arg]
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster
+ }
init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
self.client.api.init_swarm(**init_kwargs)
self.reload()
diff --git a/docker/models/volumes.py b/docker/models/volumes.py
index 5a31541260..3c2e837805 100644
--- a/docker/models/volumes.py
+++ b/docker/models/volumes.py
@@ -1,3 +1,4 @@
+from ..api import APIClient
from .resource import Model, Collection
@@ -10,21 +11,31 @@ def name(self):
"""The name of the volume."""
return self.attrs['Name']
- def remove(self):
- """Remove this volume."""
- return self.client.api.remove_volume(self.id)
+ def remove(self, force=False):
+ """
+ Remove this volume.
+
+ Args:
+ force (bool): Force removal of volumes that were already removed
+ out of band by the volume driver plugin.
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If volume failed to remove.
+ """
+ return self.client.api.remove_volume(self.id, force=force)
class VolumeCollection(Collection):
"""Volumes on the Docker server."""
model = Volume
- def create(self, name, **kwargs):
+ def create(self, name=None, **kwargs):
"""
Create a volume.
Args:
- name (str): Name of the volume
+ name (str): Name of the volume. If not specified, the engine
+ generates a name.
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
@@ -82,3 +93,7 @@ def list(self, **kwargs):
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']]
+
+ def prune(self, filters=None):
+ return self.client.api.prune_volumes(filters=filters)
+ prune.__doc__ = APIClient.prune_volumes.__doc__
diff --git a/docker/types/__init__.py b/docker/types/__init__.py
index 8e2fc17472..0e88776013 100644
--- a/docker/types/__init__.py
+++ b/docker/types/__init__.py
@@ -4,6 +4,6 @@
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ContainerSpec, DriverConfig, EndpointSpec, Mount, Resources, RestartPolicy,
- ServiceMode, TaskTemplate, UpdateConfig
+ SecretReference, ServiceMode, TaskTemplate, UpdateConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/docker/types/containers.py b/docker/types/containers.py
index 8fdecb3e3d..9a8d1574e8 100644
--- a/docker/types/containers.py
+++ b/docker/types/containers.py
@@ -117,7 +117,7 @@ def __init__(self, version, binds=None, port_bindings=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, pids_limit=None,
- isolation=None):
+ isolation=None, auto_remove=False, storage_opt=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
@@ -407,6 +407,16 @@ def __init__(self, version, binds=None, port_bindings=None,
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
+ if auto_remove:
+ if version_lt(version, '1.25'):
+ raise host_config_version_error('auto_remove', '1.25')
+ self['AutoRemove'] = auto_remove
+
+ if storage_opt is not None:
+ if version_lt(version, '1.24'):
+ raise host_config_version_error('storage_opt', '1.24')
+ self['StorageOpt'] = storage_opt
+
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
@@ -433,6 +443,7 @@ def __init__(
working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
host_config=None, mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None, healthcheck=None,
+ stop_timeout=None
):
if isinstance(command, six.string_types):
command = split_command(command)
@@ -461,6 +472,11 @@ def __init__(
'stop_signal was only introduced in API version 1.21'
)
+ if stop_timeout is not None and version_lt(version, '1.25'):
+ raise errors.InvalidVersion(
+ 'stop_timeout was only introduced in API version 1.25'
+ )
+
if healthcheck is not None and version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
@@ -579,4 +595,5 @@ def __init__(
'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
+ 'StopTimeout': stop_timeout
})
diff --git a/docker/types/services.py b/docker/types/services.py
index ec0fcb15f0..b903fa434b 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -2,7 +2,7 @@
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
-from ..utils import format_environment, split_command
+from ..utils import check_resource, format_environment, split_command
class TaskTemplate(dict):
@@ -21,9 +21,11 @@ class TaskTemplate(dict):
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (:py:class:`list`): A list of constraints.
+ force_update (int): A counter that triggers an update even if no
+ relevant parameters have been changed.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
- placement=None, log_driver=None):
+ placement=None, log_driver=None, force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
@@ -36,6 +38,11 @@ def __init__(self, container_spec, resources=None, restart_policy=None,
if log_driver:
self['LogDriver'] = log_driver
+ if force_update is not None:
+ if not isinstance(force_update, int):
+ raise TypeError('force_update must be an integer')
+ self['ForceUpdate'] = force_update
+
@property
def container_spec(self):
return self.get('ContainerSpec')
@@ -72,9 +79,12 @@ class ContainerSpec(dict):
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
+ secrets (list of py:class:`SecretReference`): List of secrets to be
+ made available inside the containers.
"""
def __init__(self, image, command=None, args=None, env=None, workdir=None,
- user=None, labels=None, mounts=None, stop_grace_period=None):
+ user=None, labels=None, mounts=None, stop_grace_period=None,
+ secrets=None):
self['Image'] = image
if isinstance(command, six.string_types):
@@ -102,6 +112,11 @@ def __init__(self, image, command=None, args=None, env=None, workdir=None,
if stop_grace_period is not None:
self['StopGracePeriod'] = stop_grace_period
+ if secrets is not None:
+ if not isinstance(secrets, list):
+ raise TypeError('secrets must be a list')
+ self['Secrets'] = secrets
+
class Mount(dict):
"""
@@ -233,8 +248,14 @@ class UpdateConfig(dict):
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue`` and ``pause``. Default: ``continue``
+ monitor (int): Amount of time to monitor each updated task for
+ failures, in nanoseconds.
+ max_failure_ratio (float): The fraction of tasks that may fail during
+ an update before the failure action is invoked, specified as a
+ floating point number between 0 and 1. Default: 0
"""
- def __init__(self, parallelism=0, delay=None, failure_action='continue'):
+ def __init__(self, parallelism=0, delay=None, failure_action='continue',
+ monitor=None, max_failure_ratio=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
@@ -244,6 +265,20 @@ def __init__(self, parallelism=0, delay=None, failure_action='continue'):
)
self['FailureAction'] = failure_action
+ if monitor is not None:
+ if not isinstance(monitor, int):
+ raise TypeError('monitor must be an integer')
+ self['Monitor'] = monitor
+
+ if max_failure_ratio is not None:
+ if not isinstance(max_failure_ratio, (float, int)):
+ raise TypeError('max_failure_ratio must be a float')
+ if max_failure_ratio > 1 or max_failure_ratio < 0:
+ raise errors.InvalidArgument(
+ 'max_failure_ratio must be a number between 0 and 1'
+ )
+ self['MaxFailureRatio'] = max_failure_ratio
+
class RestartConditionTypesEnum(object):
_values = (
@@ -383,3 +418,31 @@ def replicas(self):
if self.mode != 'replicated':
return None
return self['replicated'].get('Replicas')
+
+
+class SecretReference(dict):
+ """
+ Secret reference to be used as part of a :py:class:`ContainerSpec`.
+ Describes how a secret is made accessible inside the service's
+ containers.
+
+ Args:
+ secret_id (string): Secret's ID
+ secret_name (string): Secret's name as defined at its creation.
+ filename (string): Name of the file containing the secret. Defaults
+ to the secret's name if not specified.
+ uid (string): UID of the secret file's owner. Default: 0
+ gid (string): GID of the secret file's group. Default: 0
+ mode (int): File access mode inside the container. Default: 0o444
+ """
+ @check_resource
+ def __init__(self, secret_id, secret_name, filename=None, uid=None,
+ gid=None, mode=0o444):
+ self['SecretName'] = secret_name
+ self['SecretID'] = secret_id
+ self['File'] = {
+ 'Name': filename or secret_name,
+ 'UID': uid or '0',
+ 'GID': gid or '0',
+ 'Mode': mode
+ }
diff --git a/docker/utils/__init__.py b/docker/utils/__init__.py
index 4f6a38c4dc..b758cbd4ec 100644
--- a/docker/utils/__init__.py
+++ b/docker/utils/__init__.py
@@ -1,12 +1,13 @@
# flake8: noqa
+from .build import tar, exclude_paths
+from .decorators import check_resource, minimum_version, update_headers
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
- mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
+ mkbuildcontext, parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
create_host_config, parse_bytes, ping_registry, parse_env_file, version_lt,
version_gte, decode_json_header, split_command, create_ipam_config,
create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
- format_environment,
+ format_environment, create_archive
)
-from .decorators import check_resource, minimum_version, update_headers
diff --git a/docker/utils/build.py b/docker/utils/build.py
new file mode 100644
index 0000000000..6ba47b39fb
--- /dev/null
+++ b/docker/utils/build.py
@@ -0,0 +1,138 @@
+import os
+
+from .fnmatch import fnmatch
+from .utils import create_archive
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
+ root=root, fileobj=fileobj, gzip=gzip
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ exceptions = [p for p in patterns if p.startswith('!')]
+
+ include_patterns = [p[1:] for p in exceptions]
+ include_patterns += [dockerfile, '.dockerignore']
+
+ exclude_patterns = list(set(patterns) - set(exceptions))
+
+ paths = get_paths(root, exclude_patterns, include_patterns,
+ has_exceptions=len(exceptions) > 0)
+
+ return set(paths).union(
+ # If the Dockerfile is in a subdirectory that is excluded, get_paths
+ # will not descend into it and the file will be skipped. This ensures
+ # it doesn't happen.
+ set([dockerfile])
+ if os.path.exists(os.path.join(root, dockerfile)) else set()
+ )
+
+
+def should_include(path, exclude_patterns, include_patterns):
+ """
+ Given a path, a list of exclude patterns, and a list of inclusion patterns:
+
+ 1. Returns True if the path doesn't match any exclusion pattern
+ 2. Returns False if the path matches an exclusion pattern and doesn't match
+ an inclusion pattern
+ 3. Returns true if the path matches an exclusion pattern and matches an
+ inclusion pattern
+ """
+ for pattern in exclude_patterns:
+ if match_path(path, pattern):
+ for pattern in include_patterns:
+ if match_path(path, pattern):
+ return True
+ return False
+ return True
+
+
+def should_check_directory(directory_path, exclude_patterns, include_patterns):
+ """
+ Given a directory path, a list of exclude patterns, and a list of inclusion
+ patterns:
+
+ 1. Returns True if the directory path should be included according to
+ should_include.
+ 2. Returns True if the directory path is the prefix for an inclusion
+ pattern
+ 3. Returns False otherwise
+ """
+
+ # To account for exception rules, check directories if their path is a
+ # a prefix to an inclusion pattern. This logic conforms with the current
+ # docker logic (2016-10-27):
+ # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
+
+ def normalize_path(path):
+ return path.replace(os.path.sep, '/')
+
+ path_with_slash = normalize_path(directory_path) + '/'
+ possible_child_patterns = [
+ pattern for pattern in map(normalize_path, include_patterns)
+ if (pattern + '/').startswith(path_with_slash)
+ ]
+ directory_included = should_include(
+ directory_path, exclude_patterns, include_patterns
+ )
+ return directory_included or len(possible_child_patterns) > 0
+
+
+def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
+ paths = []
+
+ for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
+ parent = os.path.relpath(parent, root)
+ if parent == '.':
+ parent = ''
+
+ # Remove excluded patterns from the list of directories to traverse
+ # by mutating the dirs we're iterating over.
+ # This looks strange, but is considered the correct way to skip
+ # traversal. See https://docs.python.org/2/library/os.html#os.walk
+ dirs[:] = [
+ d for d in dirs if should_check_directory(
+ os.path.join(parent, d), exclude_patterns, include_patterns
+ )
+ ]
+
+ for path in dirs:
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
+ for path in files:
+ if should_include(os.path.join(parent, path),
+ exclude_patterns, include_patterns):
+ paths.append(os.path.join(parent, path))
+
+ return paths
+
+
+def match_path(path, pattern):
+ pattern = pattern.rstrip('/' + os.path.sep)
+ if pattern:
+ pattern = os.path.relpath(pattern)
+
+ if '**' not in pattern:
+ pattern_components = pattern.split(os.path.sep)
+ path_components = path.split(os.path.sep)[:len(pattern_components)]
+ else:
+ path_components = path.split(os.path.sep)
+ return fnmatch('/'.join(path_components), pattern)
diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
index 2fe880c4a5..18cde412ff 100644
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -16,7 +16,7 @@ def wrapped(self, resource_id=None, *args, **kwargs):
resource_id = resource_id.get('Id', resource_id.get('ID'))
if not resource_id:
raise errors.NullResource(
- 'image or container param is undefined'
+ 'Resource ID was not provided'
)
return f(self, resource_id, *args, **kwargs)
return wrapped
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
new file mode 100644
index 0000000000..80bdf77329
--- /dev/null
+++ b/docker/utils/fnmatch.py
@@ -0,0 +1,106 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ import os
+ name = os.path.normcase(name)
+ pat = os.path.normcase(pat)
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+
+ recursive_mode = False
+ i, n = 0, len(pat)
+ res = ''
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ recursive_mode = True
+ i = i + 1
+ res = res + '.*'
+ elif c == '?':
+ res = res + '.'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ elif recursive_mode and c == '/':
+ res = res + '/?'
+ else:
+ res = res + re.escape(c)
+ return res + '\Z(?ms)'
diff --git a/docker/utils/types.py b/docker/utils/types.py
new file mode 100644
index 0000000000..8098c470f8
--- /dev/null
+++ b/docker/utils/types.py
@@ -0,0 +1,7 @@
+# Compatibility module. See https://github.com/docker/docker-py/issues/1196
+
+import warnings
+
+from ..types import Ulimit, LogConfig # flake8: noqa
+
+warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index e12fcf00dc..d9a6d7c1ba 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -4,17 +4,16 @@
import os.path
import json
import shlex
-import sys
import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
from datetime import datetime
-from fnmatch import fnmatch
import requests
import six
+from .. import constants
from .. import errors
from .. import tls
@@ -79,157 +78,48 @@ def decode_json_header(header):
return json.loads(data)
-def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
-
- root = os.path.abspath(path)
- exclude = exclude or []
-
- for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
+ if files is None:
+ files = build_file_list(root)
+ for path in files:
i = t.gettarinfo(os.path.join(root, path), arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
- if sys.platform == 'win32':
+ if constants.IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
try:
# We open the file object in binary mode for Windows support.
- f = open(os.path.join(root, path), 'rb')
+ with open(os.path.join(root, path), 'rb') as f:
+ t.addfile(i, f)
except IOError:
# When we encounter a directory the file object is set to None.
- f = None
-
- t.addfile(i, f)
-
+ t.addfile(i, None)
t.close()
fileobj.seek(0)
return fileobj
-def exclude_paths(root, patterns, dockerfile=None):
- """
- Given a root directory path and a list of .dockerignore patterns, return
- an iterator of all paths (both regular files and directories) in the root
- directory that do *not* match any of the patterns.
-
- All paths returned are relative to the root.
- """
- if dockerfile is None:
- dockerfile = 'Dockerfile'
-
- exceptions = [p for p in patterns if p.startswith('!')]
-
- include_patterns = [p[1:] for p in exceptions]
- include_patterns += [dockerfile, '.dockerignore']
-
- exclude_patterns = list(set(patterns) - set(exceptions))
-
- paths = get_paths(root, exclude_patterns, include_patterns,
- has_exceptions=len(exceptions) > 0)
-
- return set(paths).union(
- # If the Dockerfile is in a subdirectory that is excluded, get_paths
- # will not descend into it and the file will be skipped. This ensures
- # it doesn't happen.
- set([dockerfile])
- if os.path.exists(os.path.join(root, dockerfile)) else set()
- )
-
-
-def should_include(path, exclude_patterns, include_patterns):
- """
- Given a path, a list of exclude patterns, and a list of inclusion patterns:
-
- 1. Returns True if the path doesn't match any exclusion pattern
- 2. Returns False if the path matches an exclusion pattern and doesn't match
- an inclusion pattern
- 3. Returns true if the path matches an exclusion pattern and matches an
- inclusion pattern
- """
- for pattern in exclude_patterns:
- if match_path(path, pattern):
- for pattern in include_patterns:
- if match_path(path, pattern):
- return True
- return False
- return True
-
-
-def should_check_directory(directory_path, exclude_patterns, include_patterns):
- """
- Given a directory path, a list of exclude patterns, and a list of inclusion
- patterns:
-
- 1. Returns True if the directory path should be included according to
- should_include.
- 2. Returns True if the directory path is the prefix for an inclusion
- pattern
- 3. Returns False otherwise
- """
-
- # To account for exception rules, check directories if their path is a
- # a prefix to an inclusion pattern. This logic conforms with the current
- # docker logic (2016-10-27):
- # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
-
- def normalize_path(path):
- return path.replace(os.path.sep, '/')
-
- path_with_slash = normalize_path(directory_path) + '/'
- possible_child_patterns = [
- pattern for pattern in map(normalize_path, include_patterns)
- if (pattern + '/').startswith(path_with_slash)
- ]
- directory_included = should_include(
- directory_path, exclude_patterns, include_patterns
- )
- return directory_included or len(possible_child_patterns) > 0
-
-
-def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
- paths = []
-
- for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
- parent = os.path.relpath(parent, root)
- if parent == '.':
- parent = ''
-
- # Remove excluded patterns from the list of directories to traverse
- # by mutating the dirs we're iterating over.
- # This looks strange, but is considered the correct way to skip
- # traversal. See https://docs.python.org/2/library/os.html#os.walk
- dirs[:] = [
- d for d in dirs if should_check_directory(
- os.path.join(parent, d), exclude_patterns, include_patterns
- )
- ]
-
- for path in dirs:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- for path in files:
- if should_include(os.path.join(parent, path),
- exclude_patterns, include_patterns):
- paths.append(os.path.join(parent, path))
-
- return paths
-
-
-def match_path(path, pattern):
- pattern = pattern.rstrip('/' + os.path.sep)
- if pattern:
- pattern = os.path.relpath(pattern)
-
- pattern_components = pattern.split(os.path.sep)
- path_components = path.split(os.path.sep)[:len(pattern_components)]
- return fnmatch('/'.join(path_components), pattern)
-
-
def compare_version(v1, v2):
"""Compare docker versions
diff --git a/docker/version.py b/docker/version.py
index b0c244af3e..491566cd9a 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "2.0.2"
+version = "2.1.0"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/docs/api.rst b/docs/api.rst
index b5c1e92998..52cd26b2ca 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -87,6 +87,26 @@ Services
:members:
:undoc-members:
+Plugins
+-------
+
+.. py:module:: docker.api.plugin
+
+.. rst-class:: hide-signature
+.. autoclass:: PluginApiMixin
+ :members:
+ :undoc-members:
+
+Secrets
+-------
+
+.. py:module:: docker.api.secret
+
+.. rst-class:: hide-signature
+.. autoclass:: SecretApiMixin
+ :members:
+ :undoc-members:
+
The Docker daemon
-----------------
@@ -110,6 +130,7 @@ Configuration types
.. autoclass:: Mount
.. autoclass:: Resources
.. autoclass:: RestartPolicy
+.. autoclass:: SecretReference
.. autoclass:: ServiceMode
.. autoclass:: TaskTemplate
.. autoclass:: UpdateConfig
diff --git a/docs/change-log.md b/docs/change-log.md
index 8b6d859ea8..68b27b8bbb 100644
--- a/docs/change-log.md
+++ b/docs/change-log.md
@@ -1,6 +1,57 @@
Change log
==========
+2.1.0
+-----
+
+[List of PRs / issues for this release](https://github.com/docker/docker-py/milestone/27?closed=1)
+
+### Features
+
+* Added the following pruning methods:
+ * In `APIClient`: `prune_containers`, `prune_images`, `prune_networks`,
+ `prune_volumes`
+ * In `DockerClient`: `containers.prune`, `images.prune`, `networks.prune`,
+ `volumes.prune`
+* Added support for the plugins API:
+ * In `APIClient`: `configure_plugin`, `create_plugin`, `disable_plugin`,
+ `enable_plugin`, `inspect_plugin`, `pull_plugin`, `plugins`,
+ `plugin_privileges`, `push_plugin`, `remove_plugin`
+ * In `DockerClient`: `plugins.create`, `plugins.get`, `plugins.install`,
+ `plugins.list`, and the `Plugin` model.
+* Added support for the secrets API:
+ * In `APIClient`: `create_secret`, `inspect_secret`, `remove_secret`,
+ `secrets`
+ * In `DockerClient`: `secret.create`, `secret.get`, `secret.list` and
+ the `Secret` model.
+ * Added `secrets` parameter to `ContainerSpec`. Each item in the `secrets`
+ list must be a `docker.types.SecretReference` instance.
+* Added support for `cache_from` in `APIClient.build` and
+ `DockerClient.images.build`.
+* Added support for `auto_remove` and `storage_opt` in
+ `APIClient.create_host_config` and `DockerClient.containers.run`
+* Added support for `stop_timeout` in `APIClient.create_container` and
+ `DockerClient.containers.run`
+* Added support for the `force` parameter in `APIClient.remove_volume` and
+ `Volume.remove`
+* Added support for `max_failure_ratio` and `monitor` in `UpdateConfig`
+* Added support for `force_update` in `TaskTemplate`
+* Made `name` parameter optional in `APIClient.create_volume` and
+ `DockerClient.volumes.create`
+
+### Bugfixes
+
+* Fixed a bug where building from a directory containing socket-type files
+ would raise an unexpected `AttributeError`.
+* Fixed an issue that was preventing the `DockerClient.swarm.init` method to
+ take into account arguments passed to it.
+* `Image.tag` now correctly returns a boolean value upon completion.
+* Fixed several issues related to passing `volumes` in
+ `DockerClient.containers.run`
+* Fixed an issue where `DockerClient.image.build` wouldn't return an `Image`
+ object even when the build was successful
+
+
2.0.2
-----
diff --git a/docs/client.rst b/docs/client.rst
index 63bce2c875..9d9edeb1b2 100644
--- a/docs/client.rst
+++ b/docs/client.rst
@@ -19,6 +19,8 @@ Client reference
.. autoattribute:: images
.. autoattribute:: networks
.. autoattribute:: nodes
+ .. autoattribute:: plugins
+ .. autoattribute:: secrets
.. autoattribute:: services
.. autoattribute:: swarm
.. autoattribute:: volumes
diff --git a/docs/conf.py b/docs/conf.py
index 4901279619..3e17678a83 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -139,7 +139,7 @@
# documentation.
#
html_theme_options = {
- 'description': 'A Python library for the Docker Remote API',
+ 'description': 'A Python library for the Docker Engine API',
'fixed_sidebar': True,
}
diff --git a/docs/containers.rst b/docs/containers.rst
index eb51ae4c97..9b27a306b8 100644
--- a/docs/containers.rst
+++ b/docs/containers.rst
@@ -14,6 +14,7 @@ Methods available on ``client.containers``:
.. automethod:: create(image, command=None, **kwargs)
.. automethod:: get(id_or_name)
.. automethod:: list(**kwargs)
+ .. automethod:: prune
Container objects
-----------------
diff --git a/docs/images.rst b/docs/images.rst
index 7572c2d6a5..25fcffc83d 100644
--- a/docs/images.rst
+++ b/docs/images.rst
@@ -14,6 +14,7 @@ Methods available on ``client.images``:
.. automethod:: get
.. automethod:: list(**kwargs)
.. automethod:: load
+ .. automethod:: prune
.. automethod:: pull
.. automethod:: push
.. automethod:: remove
diff --git a/docs/index.rst b/docs/index.rst
index 9f484cdbaa..9113bffcc8 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,14 +1,14 @@
Docker SDK for Python
=====================
-A Python library for the Docker Remote API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+A Python library for the Docker Engine API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
-For more information about the Remote API, `see its documentation `_.
+For more information about the Engine API, `see its documentation `_.
Installation
------------
-The latest stable version `is available on PyPi `_. Either add ``docker`` to your ``requirements.txt`` file or install with pip::
+The latest stable version `is available on PyPI `_. Either add ``docker`` to your ``requirements.txt`` file or install with pip::
pip install docker
@@ -84,6 +84,8 @@ That's just a taste of what you can do with the Docker SDK for Python. For more,
images
networks
nodes
+ plugins
+ secrets
services
swarm
volumes
diff --git a/docs/networks.rst b/docs/networks.rst
index f6de38bd71..b585f0bdaa 100644
--- a/docs/networks.rst
+++ b/docs/networks.rst
@@ -13,6 +13,7 @@ Methods available on ``client.networks``:
.. automethod:: create
.. automethod:: get
.. automethod:: list
+ .. automethod:: prune
Network objects
-----------------
diff --git a/docs/plugins.rst b/docs/plugins.rst
new file mode 100644
index 0000000000..a171b2bdad
--- /dev/null
+++ b/docs/plugins.rst
@@ -0,0 +1,37 @@
+Plugins
+=======
+
+.. py:module:: docker.models.plugins
+
+Manage plugins on the server.
+
+Methods available on ``client.plugins``:
+
+.. rst-class:: hide-signature
+.. py:class:: PluginCollection
+
+ .. automethod:: get
+ .. automethod:: install
+ .. automethod:: list
+
+
+Plugin objects
+--------------
+
+.. autoclass:: Plugin()
+
+ .. autoattribute:: id
+ .. autoattribute:: short_id
+ .. autoattribute:: name
+ .. autoattribute:: enabled
+ .. autoattribute:: settings
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+ .. automethod:: configure
+ .. automethod:: disable
+ .. automethod:: enable
+ .. automethod:: reload
+ .. automethod:: push
+ .. automethod:: remove
diff --git a/docs/secrets.rst b/docs/secrets.rst
new file mode 100644
index 0000000000..49e149847d
--- /dev/null
+++ b/docs/secrets.rst
@@ -0,0 +1,29 @@
+Secrets
+=======
+
+.. py:module:: docker.models.secrets
+
+Manage secrets on the server.
+
+Methods available on ``client.secrets``:
+
+.. rst-class:: hide-signature
+.. py:class:: SecretCollection
+
+ .. automethod:: create
+ .. automethod:: get
+ .. automethod:: list
+
+
+Secret objects
+--------------
+
+.. autoclass:: Secret()
+
+ .. autoattribute:: id
+ .. autoattribute:: name
+ .. py:attribute:: attrs
+
+ The raw representation of this object from the server.
+
+ .. automethod:: remove
diff --git a/docs/volumes.rst b/docs/volumes.rst
index 8c0574b562..fcd022a574 100644
--- a/docs/volumes.rst
+++ b/docs/volumes.rst
@@ -13,6 +13,7 @@ Methods available on ``client.volumes``:
.. automethod:: create
.. automethod:: get
.. automethod:: list
+ .. automethod:: prune
Volume objects
--------------
diff --git a/tests/helpers.py b/tests/helpers.py
index 1e42363144..e8ba4d6bf9 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -43,10 +43,12 @@ def untar_file(tardata, filename):
def requires_api_version(version):
+ test_version = os.environ.get(
+ 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
+ )
+
return pytest.mark.skipif(
- docker.utils.version_lt(
- docker.constants.DEFAULT_DOCKER_API_VERSION, version
- ),
+ docker.utils.version_lt(test_version, version),
reason="API version is too low (< {0})".format(version)
)
@@ -68,7 +70,9 @@ def force_leave_swarm(client):
occasionally throws "context deadline exceeded" errors when leaving."""
while True:
try:
- return client.swarm.leave(force=True)
+ if isinstance(client, docker.DockerClient):
+ return client.swarm.leave(force=True)
+ return client.leave_swarm(force=True) # elif APIClient
except docker.errors.APIError as e:
if e.explanation == "context deadline exceeded":
continue
diff --git a/tests/integration/api_build_test.py b/tests/integration/api_build_test.py
index 3dac0e932d..fe5d994dd6 100644
--- a/tests/integration/api_build_test.py
+++ b/tests/integration/api_build_test.py
@@ -3,12 +3,12 @@
import shutil
import tempfile
-import six
-
from docker import errors
-from ..helpers import requires_api_version
+import six
+
from .base import BaseAPIIntegrationTest
+from ..helpers import requires_api_version
class BuildTest(BaseAPIIntegrationTest):
@@ -153,6 +153,42 @@ def test_build_labels(self):
info = self.client.inspect_image('labels')
self.assertEqual(info['Config']['Labels'], labels)
+ @requires_api_version('1.25')
+ def test_build_with_cache_from(self):
+ script = io.BytesIO('\n'.join([
+ 'FROM busybox',
+ 'ENV FOO=bar',
+ 'RUN touch baz',
+ 'RUN touch bax',
+ ]).encode('ascii'))
+
+ stream = self.client.build(fileobj=script, tag='build1')
+ self.tmp_imgs.append('build1')
+ for chunk in stream:
+ pass
+
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['build1'],
+ decode=True
+ )
+ self.tmp_imgs.append('build2')
+ counter = 0
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 3
+ self.client.remove_image('build2')
+
+ counter = 0
+ stream = self.client.build(
+ fileobj=script, tag='build2', cache_from=['nosuchtag'],
+ decode=True
+ )
+ for chunk in stream:
+ if 'Using cache' in chunk.get('stream', ''):
+ counter += 1
+ assert counter == 0
+
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
diff --git a/tests/integration/api_client_test.py b/tests/integration/api_client_test.py
index dab8ddf382..02bb435ada 100644
--- a/tests/integration/api_client_test.py
+++ b/tests/integration/api_client_test.py
@@ -24,14 +24,6 @@ def test_info(self):
self.assertIn('Images', res)
self.assertIn('Debug', res)
- def test_search(self):
- client = docker.APIClient(timeout=10, **kwargs_from_env())
- res = client.search('busybox')
- self.assertTrue(len(res) >= 1)
- base_img = [x for x in res if x['name'] == 'busybox']
- self.assertEqual(len(base_img), 1)
- self.assertIn('description', base_img[0])
-
class LinkTest(BaseAPIIntegrationTest):
def test_remove_link(self):
@@ -126,8 +118,11 @@ def test_client_init(self):
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
- self.client = docker.api.APIClient(base_url='http://192.168.10.2:4243',
- timeout=self.timeout)
+ self.client = docker.api.APIClient(
+ version=docker.constants.MINIMUM_DOCKER_API_VERSION,
+ base_url='http://192.168.10.2:4243',
+ timeout=self.timeout
+ )
def test_timeout(self):
start = time.time()
diff --git a/tests/integration/api_container_test.py b/tests/integration/api_container_test.py
index bebadb71b5..07097ed863 100644
--- a/tests/integration/api_container_test.py
+++ b/tests/integration/api_container_test.py
@@ -6,12 +6,14 @@
from docker.constants import IS_WINDOWS_PLATFORM
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
+
import pytest
+
import six
-from ..helpers import requires_api_version
+from .base import BUSYBOX, BaseAPIIntegrationTest
from .. import helpers
-from .base import BaseAPIIntegrationTest, BUSYBOX
+from ..helpers import requires_api_version
class ListContainersTest(BaseAPIIntegrationTest):
@@ -401,6 +403,42 @@ def test_create_with_isolation(self):
config = self.client.inspect_container(container)
assert config['HostConfig']['Isolation'] == 'default'
+ @requires_api_version('1.25')
+ def test_create_with_auto_remove(self):
+ host_config = self.client.create_host_config(
+ auto_remove=True
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['AutoRemove'] is True
+
+ @requires_api_version('1.25')
+ def test_create_with_stop_timeout(self):
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], stop_timeout=25
+ )
+ self.tmp_containers.append(container['Id'])
+ config = self.client.inspect_container(container)
+ assert config['Config']['StopTimeout'] == 25
+
+ @requires_api_version('1.24')
+ @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ def test_create_with_storage_opt(self):
+ host_config = self.client.create_host_config(
+ storage_opt={'size': '120G'}
+ )
+ container = self.client.create_container(
+ BUSYBOX, ['echo', 'test'], host_config=host_config
+ )
+ self.tmp_containers.append(container)
+ config = self.client.inspect_container(container)
+ assert config['HostConfig']['StorageOpt'] == {
+ 'size': '120G'
+ }
+
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
@@ -1073,6 +1111,20 @@ def test_pause_unpause(self):
self.assertEqual(state['Paused'], False)
+class PruneTest(BaseAPIIntegrationTest):
+ @requires_api_version('1.25')
+ def test_prune_containers(self):
+ container1 = self.client.create_container(BUSYBOX, ['echo', 'hello'])
+ container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.client.start(container1)
+ self.client.start(container2)
+ self.client.wait(container1)
+ result = self.client.prune_containers()
+ assert container1['Id'] in result['ContainersDeleted']
+ assert result['SpaceReclaimed'] > 0
+ assert container2['Id'] not in result['ContainersDeleted']
+
+
class GetContainerStatsTest(BaseAPIIntegrationTest):
@requires_api_version('1.19')
def test_get_container_stats_no_stream(self):
diff --git a/tests/integration/api_image_test.py b/tests/integration/api_image_test.py
index 135f115b1c..11146a8a00 100644
--- a/tests/integration/api_image_test.py
+++ b/tests/integration/api_image_test.py
@@ -14,6 +14,7 @@
import docker
+from ..helpers import requires_api_version
from .base import BaseAPIIntegrationTest, BUSYBOX
@@ -285,3 +286,32 @@ def test_import_from_url(self):
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
+
+
+@requires_api_version('1.25')
+class PruneImagesTest(BaseAPIIntegrationTest):
+ def test_prune_images(self):
+ try:
+ self.client.remove_image('hello-world')
+ except docker.errors.APIError:
+ pass
+
+ # Ensure busybox does not get pruned
+ ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
+ self.tmp_containers.append(ctnr)
+
+ self.client.pull('hello-world')
+ self.tmp_imgs.append('hello-world')
+ img_id = self.client.inspect_image('hello-world')['Id']
+ result = self.client.prune_images()
+ assert img_id not in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
+ result = self.client.prune_images({'dangling': False})
+ assert result['SpaceReclaimed'] > 0
+ assert 'hello-world:latest' in [
+ img.get('Untagged') for img in result['ImagesDeleted']
+ ]
+ assert img_id in [
+ img.get('Deleted') for img in result['ImagesDeleted']
+ ]
diff --git a/tests/integration/api_network_test.py b/tests/integration/api_network_test.py
index 2c297a00a6..b3ae512080 100644
--- a/tests/integration/api_network_test.py
+++ b/tests/integration/api_network_test.py
@@ -3,7 +3,7 @@
import pytest
from ..helpers import random_name, requires_api_version
-from .base import BaseAPIIntegrationTest
+from .base import BaseAPIIntegrationTest, BUSYBOX
class TestNetworks(BaseAPIIntegrationTest):
@@ -88,19 +88,17 @@ def test_create_network_with_host_driver_fails(self):
@requires_api_version('1.21')
def test_remove_network(self):
- initial_size = len(self.client.networks())
-
net_name, net_id = self.create_network()
- self.assertEqual(len(self.client.networks()), initial_size + 1)
+ assert net_name in [n['Name'] for n in self.client.networks()]
self.client.remove_network(net_id)
- self.assertEqual(len(self.client.networks()), initial_size)
+ assert net_name not in [n['Name'] for n in self.client.networks()]
@requires_api_version('1.21')
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -128,7 +126,7 @@ def test_connect_and_disconnect_container(self):
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -155,7 +153,7 @@ def test_connect_and_force_disconnect_container(self):
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
- container = self.client.create_container('busybox', 'top')
+ container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
@@ -173,7 +171,7 @@ def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
@@ -194,7 +192,7 @@ def test_create_with_aliases(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
- image='busybox',
+ image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
@@ -224,7 +222,7 @@ def test_create_with_ipv4_address(self):
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -253,7 +251,7 @@ def test_create_with_ipv6_address(self):
),
)
container = self.client.create_container(
- image='busybox', command='top',
+ image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
@@ -276,7 +274,7 @@ def test_create_with_ipv6_address(self):
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
- 'busybox', 'top',
+ BUSYBOX, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
@@ -453,3 +451,9 @@ def test_create_network_attachable(self):
_, net_id = self.create_network(driver='overlay', attachable=True)
net = self.client.inspect_network(net_id)
assert net['Attachable'] is True
+
+ @requires_api_version('1.25')
+ def test_prune_networks(self):
+ net_name, _ = self.create_network()
+ result = self.client.prune_networks()
+ assert net_name in result['NetworksDeleted']
diff --git a/tests/integration/api_plugin_test.py b/tests/integration/api_plugin_test.py
new file mode 100644
index 0000000000..e90a1088fc
--- /dev/null
+++ b/tests/integration/api_plugin_test.py
@@ -0,0 +1,135 @@
+import os
+
+import docker
+import pytest
+
+from .base import BaseAPIIntegrationTest, TEST_API_VERSION
+from ..helpers import requires_api_version
+
+SSHFS = 'vieux/sshfs:latest'
+
+
+@requires_api_version('1.25')
+class PluginTest(BaseAPIIntegrationTest):
+ @classmethod
+ def teardown_class(cls):
+ c = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60,
+ **docker.utils.kwargs_from_env()
+ )
+ try:
+ c.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def teardown_method(self, method):
+ try:
+ self.client.disable_plugin(SSHFS)
+ except docker.errors.APIError:
+ pass
+
+ for p in self.tmp_plugins:
+ try:
+ self.client.remove_plugin(p, force=True)
+ except docker.errors.APIError:
+ pass
+
+ def ensure_plugin_installed(self, plugin_name):
+ try:
+ return self.client.inspect_plugin(plugin_name)
+ except docker.errors.NotFound:
+ prv = self.client.plugin_privileges(plugin_name)
+ for d in self.client.pull_plugin(plugin_name, prv):
+ pass
+ return self.client.inspect_plugin(plugin_name)
+
+ def test_enable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ with pytest.raises(docker.errors.APIError):
+ self.client.enable_plugin(SSHFS)
+
+ def test_disable_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.enable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is True
+ self.client.disable_plugin(SSHFS)
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert pl_data['Enabled'] is False
+ with pytest.raises(docker.errors.APIError):
+ self.client.disable_plugin(SSHFS)
+
+ def test_inspect_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.inspect_plugin(SSHFS)
+ assert 'Config' in data
+ assert 'Name' in data
+ assert data['Name'] == SSHFS
+
+ def test_plugin_privileges(self):
+ prv = self.client.plugin_privileges(SSHFS)
+ assert isinstance(prv, list)
+ for item in prv:
+ assert 'Name' in item
+ assert 'Value' in item
+ assert 'Description' in item
+
+ def test_list_plugins(self):
+ self.ensure_plugin_installed(SSHFS)
+ data = self.client.plugins()
+ assert len(data) > 0
+ plugin = [p for p in data if p['Name'] == SSHFS][0]
+ assert 'Config' in plugin
+
+ def test_configure_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ self.client.configure_plugin(SSHFS, {
+ 'DEBUG': '1'
+ })
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'Env' in pl_data['Settings']
+ assert 'DEBUG=1' in pl_data['Settings']['Env']
+
+ self.client.configure_plugin(SSHFS, ['DEBUG=0'])
+ pl_data = self.client.inspect_plugin(SSHFS)
+ assert 'DEBUG=0' in pl_data['Settings']['Env']
+
+ def test_remove_plugin(self):
+ pl_data = self.ensure_plugin_installed(SSHFS)
+ assert pl_data['Enabled'] is False
+ assert self.client.remove_plugin(SSHFS) is True
+
+ def test_force_remove_plugin(self):
+ self.ensure_plugin_installed(SSHFS)
+ self.client.enable_plugin(SSHFS)
+ assert self.client.inspect_plugin(SSHFS)['Enabled'] is True
+ assert self.client.remove_plugin(SSHFS, force=True) is True
+
+ def test_install_plugin(self):
+ try:
+ self.client.remove_plugin(SSHFS, force=True)
+ except docker.errors.APIError:
+ pass
+
+ prv = self.client.plugin_privileges(SSHFS)
+ logs = [d for d in self.client.pull_plugin(SSHFS, prv)]
+ assert filter(lambda x: x['status'] == 'Download complete', logs)
+ assert self.client.inspect_plugin(SSHFS)
+ assert self.client.enable_plugin(SSHFS)
+
+ def test_create_plugin(self):
+ plugin_data_dir = os.path.join(
+ os.path.dirname(__file__), 'testdata/dummy-plugin'
+ )
+ assert self.client.create_plugin(
+ 'docker-sdk-py/dummy', plugin_data_dir
+ )
+ self.tmp_plugins.append('docker-sdk-py/dummy')
+ data = self.client.inspect_plugin('docker-sdk-py/dummy')
+ assert data['Config']['Entrypoint'] == ['/dummy']
diff --git a/tests/integration/api_secret_test.py b/tests/integration/api_secret_test.py
new file mode 100644
index 0000000000..dcd880f49c
--- /dev/null
+++ b/tests/integration/api_secret_test.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+import docker
+import pytest
+
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest
+
+
+@requires_api_version('1.25')
+class SecretAPITest(BaseAPIIntegrationTest):
+ def setUp(self):
+ super(SecretAPITest, self).setUp()
+ self.init_swarm()
+
+ def tearDown(self):
+ super(SecretAPITest, self).tearDown()
+ force_leave_swarm(self.client)
+
+ def test_create_secret(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_create_secret_unicode_data(self):
+ secret_id = self.client.create_secret(
+ 'favorite_character', u'いざよいさくや'
+ )
+ self.tmp_secrets.append(secret_id)
+ assert 'ID' in secret_id
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == 'favorite_character'
+
+ def test_inspect_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+ data = self.client.inspect_secret(secret_id)
+ assert data['Spec']['Name'] == secret_name
+ assert 'ID' in data
+ assert 'Version' in data
+
+ def test_remove_secret(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ assert self.client.remove_secret(secret_id)
+ with pytest.raises(docker.errors.NotFound):
+ self.client.inspect_secret(secret_id)
+
+ def test_list_secrets(self):
+ secret_name = 'favorite_character'
+ secret_id = self.client.create_secret(
+ secret_name, 'sakuya izayoi'
+ )
+ self.tmp_secrets.append(secret_id)
+
+ data = self.client.secrets(filters={'names': ['favorite_character']})
+ assert len(data) == 1
+ assert data[0]['ID'] == secret_id['ID']
diff --git a/tests/integration/api_service_test.py b/tests/integration/api_service_test.py
index 77d7d28f7e..1dd295dfb5 100644
--- a/tests/integration/api_service_test.py
+++ b/tests/integration/api_service_test.py
@@ -1,15 +1,18 @@
+# -*- coding: utf-8 -*-
+
import random
+import time
import docker
-from ..helpers import requires_api_version
-from .base import BaseAPIIntegrationTest
+from ..helpers import force_leave_swarm, requires_api_version
+from .base import BaseAPIIntegrationTest, BUSYBOX
class ServiceTest(BaseAPIIntegrationTest):
def setUp(self):
super(ServiceTest, self).setUp()
- self.client.leave_swarm(force=True)
+ force_leave_swarm(self.client)
self.init_swarm()
def tearDown(self):
@@ -19,11 +22,26 @@ def tearDown(self):
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
- self.client.leave_swarm(force=True)
+ force_leave_swarm(self.client)
def get_service_name(self):
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
+ def get_service_container(self, service_name, attempts=20, interval=0.5):
+ # There is some delay between the service's creation and the creation
+ # of the service's containers. This method deals with the uncertainty
+ # when trying to retrieve the container associated with a service.
+ while True:
+ containers = self.client.containers(
+ filters={'name': [service_name]}, quiet=True
+ )
+ if len(containers) > 0:
+ return containers[0]
+ attempts -= 1
+ if attempts <= 0:
+ return None
+ time.sleep(interval)
+
def create_simple_service(self, name=None):
if name:
name = 'dockerpytest_{0}'.format(name)
@@ -31,7 +49,7 @@ def create_simple_service(self, name=None):
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
+ BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
return name, self.client.create_service(task_tmpl, name=name)
@@ -81,7 +99,7 @@ def test_create_service_simple(self):
def test_create_service_custom_log_driver(self):
container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
+ BUSYBOX, ['echo', 'hello']
)
log_cfg = docker.types.DriverConfig('none')
task_tmpl = docker.types.TaskTemplate(
@@ -99,7 +117,7 @@ def test_create_service_custom_log_driver(self):
def test_create_service_with_volume_mount(self):
vol_name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
- 'busybox', ['ls'],
+ BUSYBOX, ['ls'],
mounts=[
docker.types.Mount(target='/test', source=vol_name)
]
@@ -119,7 +137,7 @@ def test_create_service_with_volume_mount(self):
assert mount['Type'] == 'volume'
def test_create_service_with_resources_constraints(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
resources = docker.types.Resources(
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
@@ -139,7 +157,7 @@ def test_create_service_with_resources_constraints(self):
]
def test_create_service_with_update_config(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
@@ -155,8 +173,25 @@ def test_create_service_with_update_config(self):
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
- def test_create_service_with_restart_policy(self):
+ @requires_api_version('1.25')
+ def test_create_service_with_update_config_monitor(self):
container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ update_config = docker.types.UpdateConfig(
+ monitor=300000000, max_failure_ratio=0.4
+ )
+ name = self.get_service_name()
+ svc_id = self.client.create_service(
+ task_tmpl, update_config=update_config, name=name
+ )
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'UpdateConfig' in svc_info['Spec']
+ uc = svc_info['Spec']['UpdateConfig']
+ assert update_config['Monitor'] == uc['Monitor']
+ assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
+
+ def test_create_service_with_restart_policy(self):
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
policy = docker.types.RestartPolicy(
docker.types.RestartPolicy.condition_types.ANY,
delay=5, max_attempts=5
@@ -179,7 +214,7 @@ def test_create_service_with_custom_networks(self):
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
@@ -195,7 +230,7 @@ def test_create_service_with_custom_networks(self):
def test_create_service_with_placement(self):
node_id = self.client.nodes()[0]['ID']
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=['node.id=={}'.format(node_id)]
)
@@ -207,7 +242,7 @@ def test_create_service_with_placement(self):
{'Constraints': ['node.id=={}'.format(node_id)]})
def test_create_service_with_endpoint_spec(self):
- container_spec = docker.types.ContainerSpec('busybox', ['true'])
+ container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
@@ -238,7 +273,7 @@ def test_create_service_with_endpoint_spec(self):
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
- 'busybox', ['true'], env={'DOCKER_PY_TEST': 1}
+ BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
@@ -254,7 +289,7 @@ def test_create_service_with_env(self):
def test_create_service_global_mode(self):
container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
+ BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -267,7 +302,7 @@ def test_create_service_global_mode(self):
def test_create_service_replicated_mode(self):
container_spec = docker.types.ContainerSpec(
- 'busybox', ['echo', 'hello']
+ BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
@@ -279,3 +314,76 @@ def test_create_service_replicated_mode(self):
assert 'Mode' in svc_info['Spec']
assert 'Replicated' in svc_info['Spec']['Mode']
assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
+
+ @requires_api_version('1.25')
+ def test_update_service_force_update(self):
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['echo', 'hello']
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'TaskTemplate' in svc_info['Spec']
+ assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
+ version_index = svc_info['Version']['Index']
+
+ task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
+ self.client.update_service(name, version_index, task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ new_index = svc_info['Version']['Index']
+ assert new_index > version_index
+ assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
+
+ @requires_api_version('1.25')
+ def test_create_service_with_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = b'phantasmagoria of flower view'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['top'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ assert self.client.exec_start(exec_id) == secret_data
+
+ @requires_api_version('1.25')
+ def test_create_service_with_unicode_secret(self):
+ secret_name = 'favorite_touhou'
+ secret_data = u'東方花映塚'
+ secret_id = self.client.create_secret(secret_name, secret_data)
+ self.tmp_secrets.append(secret_id)
+ secret_ref = docker.types.SecretReference(secret_id, secret_name)
+ container_spec = docker.types.ContainerSpec(
+ 'busybox', ['top'], secrets=[secret_ref]
+ )
+ task_tmpl = docker.types.TaskTemplate(container_spec)
+ name = self.get_service_name()
+ svc_id = self.client.create_service(task_tmpl, name=name)
+ svc_info = self.client.inspect_service(svc_id)
+ assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
+ secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
+ assert secrets[0] == secret_ref
+
+ container = self.get_service_container(name)
+ assert container is not None
+ exec_id = self.client.exec_create(
+ container, 'cat /run/secrets/{0}'.format(secret_name)
+ )
+ container_secret = self.client.exec_start(exec_id)
+ container_secret = container_secret.decode('utf-8')
+ assert container_secret == secret_data
diff --git a/tests/integration/api_swarm_test.py b/tests/integration/api_swarm_test.py
index a8f439c8b5..d06cac21bd 100644
--- a/tests/integration/api_swarm_test.py
+++ b/tests/integration/api_swarm_test.py
@@ -2,18 +2,18 @@
import docker
import pytest
-from ..helpers import requires_api_version
+from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
- self.client.leave_swarm(force=True)
+ force_leave_swarm(self.client)
def tearDown(self):
super(SwarmTest, self).tearDown()
- self.client.leave_swarm(force=True)
+ force_leave_swarm(self.client)
@requires_api_version('1.24')
def test_init_swarm_simple(self):
diff --git a/tests/integration/api_volume_test.py b/tests/integration/api_volume_test.py
index bc97f462e5..5a4bb1e0bc 100644
--- a/tests/integration/api_volume_test.py
+++ b/tests/integration/api_volume_test.py
@@ -49,6 +49,21 @@ def test_remove_volume(self):
self.client.create_volume(name)
self.client.remove_volume(name)
+ @requires_api_version('1.25')
+ def test_force_remove_volume(self):
+ name = 'shootthebullet'
+ self.tmp_volumes.append(name)
+ self.client.create_volume(name)
+ self.client.remove_volume(name, force=True)
+
+ @requires_api_version('1.25')
+ def test_prune_volumes(self):
+ name = 'hopelessmasquerade'
+ self.client.create_volume(name)
+ self.tmp_volumes.append(name)
+ result = self.client.prune_volumes()
+ assert name in result['VolumesDeleted']
+
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
diff --git a/tests/integration/base.py b/tests/integration/base.py
index 4a41e6b81a..3c01689ab3 100644
--- a/tests/integration/base.py
+++ b/tests/integration/base.py
@@ -1,3 +1,4 @@
+import os
import shutil
import unittest
@@ -8,6 +9,7 @@
from .. import helpers
BUSYBOX = 'busybox:buildroot-2014.02'
+TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
class BaseIntegrationTest(unittest.TestCase):
@@ -25,9 +27,11 @@ def setUp(self):
self.tmp_folders = []
self.tmp_volumes = []
self.tmp_networks = []
+ self.tmp_plugins = []
+ self.tmp_secrets = []
def tearDown(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
for img in self.tmp_imgs:
try:
client.api.remove_image(img)
@@ -49,6 +53,12 @@ def tearDown(self):
except docker.errors.APIError:
pass
+ for secret in self.tmp_secrets:
+ try:
+ client.api.remove_secret(secret)
+ except docker.errors.APIError:
+ pass
+
for folder in self.tmp_folders:
shutil.rmtree(folder)
@@ -61,7 +71,13 @@ class BaseAPIIntegrationTest(BaseIntegrationTest):
def setUp(self):
super(BaseAPIIntegrationTest, self).setUp()
- self.client = docker.APIClient(timeout=60, **kwargs_from_env())
+ self.client = docker.APIClient(
+ version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
+ )
+
+ def tearDown(self):
+ super(BaseAPIIntegrationTest, self).tearDown()
+ self.client.close()
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
@@ -77,7 +93,7 @@ def run_container(self, *args, **kwargs):
return container
- def create_and_start(self, image='busybox', command='top', **kwargs):
+ def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
diff --git a/tests/integration/client_test.py b/tests/integration/client_test.py
index dfced9b66f..20e8cd55e7 100644
--- a/tests/integration/client_test.py
+++ b/tests/integration/client_test.py
@@ -2,19 +2,21 @@
import docker
+from .base import TEST_API_VERSION
+
class ClientTest(unittest.TestCase):
def test_info(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
info = client.info()
assert 'ID' in info
assert 'Name' in info
def test_ping(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
assert client.ping() is True
def test_version(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
assert 'Version' in client.version()
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 7217fe07a3..4e8d26831d 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -13,7 +13,7 @@
@pytest.fixture(autouse=True, scope='session')
def setup_test_session():
warnings.simplefilter('error')
- c = docker.APIClient(**kwargs_from_env())
+ c = docker.APIClient(version='auto', **kwargs_from_env())
try:
c.inspect_image(BUSYBOX)
except docker.errors.NotFound:
diff --git a/tests/integration/models_containers_test.py b/tests/integration/models_containers_test.py
index d8b4c62c35..4f1e6a1fe9 100644
--- a/tests/integration/models_containers_test.py
+++ b/tests/integration/models_containers_test.py
@@ -1,25 +1,26 @@
import docker
-from .base import BaseIntegrationTest
+import tempfile
+from .base import BaseIntegrationTest, TEST_API_VERSION
class ContainerCollectionTest(BaseIntegrationTest):
def test_run(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
self.assertEqual(
client.containers.run("alpine", "echo hello world", remove=True),
b'hello world\n'
)
def test_run_detach(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert container.attrs['Config']['Image'] == "alpine"
assert container.attrs['Config']['Cmd'] == ['sleep', '300']
def test_run_with_error(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.ContainerError) as cm:
client.containers.run("alpine", "cat /test", remove=True)
assert cm.exception.exit_status == 1
@@ -28,19 +29,55 @@ def test_run_with_error(self):
assert "No such file or directory" in str(cm.exception)
def test_run_with_image_that_does_not_exist(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.ImageNotFound):
client.containers.run("dockerpytest_does_not_exist")
+ def test_run_with_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ path = tempfile.mkdtemp()
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["%s:/insidecontainer" % path],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["%s:/insidecontainer" % path]
+ )
+ self.assertEqual(out, b'hello\n')
+
+ def test_run_with_named_volume(self):
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.volumes.create(name="somevolume")
+
+ container = client.containers.run(
+ "alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
+ volumes=["somevolume:/insidecontainer"],
+ detach=True
+ )
+ self.tmp_containers.append(container.id)
+ container.wait()
+
+ out = client.containers.run(
+ "alpine", "cat /insidecontainer/test",
+ volumes=["somevolume:/insidecontainer"]
+ )
+ self.assertEqual(out, b'hello\n')
+
def test_get(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert client.containers.get(container.id).attrs[
'Config']['Image'] == "alpine"
def test_list(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container_id = client.containers.run(
"alpine", "sleep 300", detach=True).id
self.tmp_containers.append(container_id)
@@ -59,7 +96,7 @@ def test_list(self):
class ContainerTest(BaseIntegrationTest):
def test_commit(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test'",
detach=True
@@ -73,14 +110,14 @@ def test_commit(self):
)
def test_diff(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "touch /test", detach=True)
self.tmp_containers.append(container.id)
container.wait()
assert container.diff() == [{'Path': '/test', 'Kind': 1}]
def test_exec_run(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
)
@@ -88,7 +125,7 @@ def test_exec_run(self):
assert container.exec_run("cat /test") == b"hello\n"
def test_kill(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
while container.status != 'running':
@@ -99,7 +136,7 @@ def test_kill(self):
assert container.status == 'exited'
def test_logs(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello world",
detach=True)
self.tmp_containers.append(container.id)
@@ -107,7 +144,7 @@ def test_logs(self):
assert container.logs() == b"hello world\n"
def test_pause(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
container.pause()
@@ -118,7 +155,7 @@ def test_pause(self):
assert container.status == "running"
def test_remove(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", detach=True)
self.tmp_containers.append(container.id)
assert container.id in [c.id for c in client.containers.list(all=True)]
@@ -128,7 +165,7 @@ def test_remove(self):
assert container.id not in [c.id for c in containers]
def test_rename(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", name="test1",
detach=True)
self.tmp_containers.append(container.id)
@@ -138,7 +175,7 @@ def test_rename(self):
assert container.name == "test2"
def test_restart(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
@@ -148,7 +185,7 @@ def test_restart(self):
assert first_started_at != second_started_at
def test_start(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.create("alpine", "sleep 50", detach=True)
self.tmp_containers.append(container.id)
assert container.status == "created"
@@ -157,7 +194,7 @@ def test_start(self):
assert container.status == "running"
def test_stats(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
stats = container.stats(stream=False)
@@ -166,7 +203,7 @@ def test_stats(self):
assert key in stats
def test_stop(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "top", detach=True)
self.tmp_containers.append(container.id)
assert container.status in ("running", "created")
@@ -175,7 +212,7 @@ def test_stop(self):
assert container.status == "exited"
def test_top(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True)
self.tmp_containers.append(container.id)
top = container.top()
@@ -183,7 +220,7 @@ def test_top(self):
assert 'sleep 60' in top['Processes'][0]
def test_update(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True,
cpu_shares=2)
self.tmp_containers.append(container.id)
@@ -193,7 +230,7 @@ def test_update(self):
assert container.attrs['HostConfig']['CpuShares'] == 3
def test_wait(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sh -c 'exit 0'",
detach=True)
self.tmp_containers.append(container.id)
diff --git a/tests/integration/models_images_test.py b/tests/integration/models_images_test.py
index 876ec292b6..4f8bb26cd5 100644
--- a/tests/integration/models_images_test.py
+++ b/tests/integration/models_images_test.py
@@ -3,13 +3,13 @@
import docker
import pytest
-from .base import BaseIntegrationTest
+from .base import BaseIntegrationTest, TEST_API_VERSION
class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
image = client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
@@ -19,7 +19,7 @@ def test_build(self):
@pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
def test_build_with_error(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
@@ -29,18 +29,18 @@ def test_build_with_error(self):
"NOTADOCKERFILECOMMAND")
def test_list(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert image.id in get_ids(client.images.list())
def test_list_with_repository(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert image.id in get_ids(client.images.list('alpine'))
assert image.id in get_ids(client.images.list('alpine:latest'))
def test_pull(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert 'alpine:latest' in image.attrs['RepoTags']
@@ -52,7 +52,7 @@ def test_tag_and_remove(self):
tag = 'some-tag'
identifier = '{}:{}'.format(repo, tag)
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
image.tag(repo, tag)
diff --git a/tests/integration/models_networks_test.py b/tests/integration/models_networks_test.py
index 771ee7d346..105dcc594a 100644
--- a/tests/integration/models_networks_test.py
+++ b/tests/integration/models_networks_test.py
@@ -1,12 +1,12 @@
import docker
from .. import helpers
-from .base import BaseIntegrationTest
+from .base import BaseIntegrationTest, TEST_API_VERSION
class ImageCollectionTest(BaseIntegrationTest):
def test_create(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name, labels={'foo': 'bar'})
self.tmp_networks.append(network.id)
@@ -14,7 +14,7 @@ def test_create(self):
assert network.attrs['Labels']['foo'] == "bar"
def test_get(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network_id = client.networks.create(name).id
self.tmp_networks.append(network_id)
@@ -22,7 +22,7 @@ def test_get(self):
assert network.name == name
def test_list_remove(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name)
self.tmp_networks.append(network.id)
@@ -50,7 +50,7 @@ def test_list_remove(self):
class ImageTest(BaseIntegrationTest):
def test_connect_disconnect(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
network = client.networks.create(helpers.random_name())
self.tmp_networks.append(network.id)
container = client.containers.create("alpine", "sleep 300")
diff --git a/tests/integration/models_nodes_test.py b/tests/integration/models_nodes_test.py
index 9fd16593ac..5823e6b1a3 100644
--- a/tests/integration/models_nodes_test.py
+++ b/tests/integration/models_nodes_test.py
@@ -3,18 +3,19 @@
import docker
from .. import helpers
+from .base import TEST_API_VERSION
class NodesTest(unittest.TestCase):
def setUp(self):
- helpers.force_leave_swarm(docker.from_env())
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def tearDown(self):
- helpers.force_leave_swarm(docker.from_env())
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_list_get_update(self):
- client = docker.from_env()
- client.swarm.init(listen_addr=helpers.swarm_listen_addr())
+ client = docker.from_env(version=TEST_API_VERSION)
+ client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
nodes = client.nodes.list()
assert len(nodes) == 1
assert nodes[0].attrs['Spec']['Role'] == 'manager'
diff --git a/tests/integration/models_resources_test.py b/tests/integration/models_resources_test.py
index b8eba81c6e..4aafe0cc74 100644
--- a/tests/integration/models_resources_test.py
+++ b/tests/integration/models_resources_test.py
@@ -1,11 +1,11 @@
import docker
-from .base import BaseIntegrationTest
+from .base import BaseIntegrationTest, TEST_API_VERSION
class ModelTest(BaseIntegrationTest):
def test_reload(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
diff --git a/tests/integration/models_services_test.py b/tests/integration/models_services_test.py
index a795df9841..9b5676d694 100644
--- a/tests/integration/models_services_test.py
+++ b/tests/integration/models_services_test.py
@@ -4,21 +4,22 @@
import pytest
from .. import helpers
+from .base import TEST_API_VERSION
class ServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
helpers.force_leave_swarm(client)
- client.swarm.init(listen_addr=helpers.swarm_listen_addr())
+ client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
@classmethod
def tearDownClass(cls):
- helpers.force_leave_swarm(docker.from_env())
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_create(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
# create arguments
@@ -36,7 +37,7 @@ def test_create(self):
assert container_spec['Labels'] == {'container': 'label'}
def test_get(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
@@ -47,7 +48,7 @@ def test_get(self):
assert service.name == name
def test_list_remove(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
name=helpers.random_name(),
image="alpine",
@@ -58,7 +59,7 @@ def test_list_remove(self):
assert service not in client.services.list()
def test_tasks(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
service1 = client.services.create(
name=helpers.random_name(),
image="alpine",
@@ -83,7 +84,7 @@ def test_tasks(self):
@pytest.mark.skip(reason="Makes Swarm unstable?")
def test_update(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
diff --git a/tests/integration/models_swarm_test.py b/tests/integration/models_swarm_test.py
index 4f177f1005..e45ff3cb72 100644
--- a/tests/integration/models_swarm_test.py
+++ b/tests/integration/models_swarm_test.py
@@ -3,19 +3,21 @@
import docker
from .. import helpers
+from .base import TEST_API_VERSION
class SwarmTest(unittest.TestCase):
def setUp(self):
- helpers.force_leave_swarm(docker.from_env())
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def tearDown(self):
- helpers.force_leave_swarm(docker.from_env())
+ helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_init_update_leave(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
client.swarm.init(
- snapshot_interval=5000, listen_addr=helpers.swarm_listen_addr()
+ advertise_addr='eth0', snapshot_interval=5000,
+ listen_addr=helpers.swarm_listen_addr()
)
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
client.swarm.update(snapshot_interval=10000)
diff --git a/tests/integration/models_volumes_test.py b/tests/integration/models_volumes_test.py
index 094e68fadb..47b4a4550f 100644
--- a/tests/integration/models_volumes_test.py
+++ b/tests/integration/models_volumes_test.py
@@ -1,10 +1,10 @@
import docker
-from .base import BaseIntegrationTest
+from .base import BaseIntegrationTest, TEST_API_VERSION
class VolumesTest(BaseIntegrationTest):
def test_create_get(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
volume = client.volumes.create(
'dockerpytest_1',
driver='local',
@@ -19,7 +19,7 @@ def test_create_get(self):
assert volume.name == 'dockerpytest_1'
def test_list_remove(self):
- client = docker.from_env()
+ client = docker.from_env(version=TEST_API_VERSION)
volume = client.volumes.create('dockerpytest_1')
self.tmp_volumes.append(volume.id)
assert volume in client.volumes.list()
diff --git a/tests/integration/testdata/dummy-plugin/config.json b/tests/integration/testdata/dummy-plugin/config.json
new file mode 100644
index 0000000000..53b4e7aa98
--- /dev/null
+++ b/tests/integration/testdata/dummy-plugin/config.json
@@ -0,0 +1,19 @@
+{
+ "description": "Dummy test plugin for docker python SDK",
+ "documentation": "https://github.com/docker/docker-py",
+ "entrypoint": ["/dummy"],
+ "network": {
+ "type": "host"
+ },
+ "interface" : {
+ "types": ["docker.volumedriver/1.0"],
+ "socket": "dummy.sock"
+ },
+ "env": [
+ {
+ "name":"DEBUG",
+ "settable":["value"],
+ "value":"0"
+ }
+ ]
+}
diff --git a/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt b/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/unit/api_container_test.py b/tests/unit/api_container_test.py
index abf3613885..51d6678151 100644
--- a/tests/unit/api_container_test.py
+++ b/tests/unit/api_container_test.py
@@ -45,7 +45,7 @@ def test_start_container_none(self):
self.assertEqual(
str(excinfo.value),
- 'image or container param is undefined',
+ 'Resource ID was not provided',
)
with pytest.raises(ValueError) as excinfo:
@@ -53,7 +53,7 @@ def test_start_container_none(self):
self.assertEqual(
str(excinfo.value),
- 'image or container param is undefined',
+ 'Resource ID was not provided',
)
def test_start_container_regression_573(self):
@@ -1559,7 +1559,7 @@ def test_inspect_container_undefined_id(self):
self.client.inspect_container(arg)
self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
+ excinfo.value.args[0], 'Resource ID was not provided'
)
def test_container_stats(self):
diff --git a/tests/unit/api_image_test.py b/tests/unit/api_image_test.py
index fbfb146bb7..36b2a46833 100644
--- a/tests/unit/api_image_test.py
+++ b/tests/unit/api_image_test.py
@@ -204,7 +204,7 @@ def test_inspect_image_undefined_id(self):
self.client.inspect_image(arg)
self.assertEqual(
- excinfo.value.args[0], 'image or container param is undefined'
+ excinfo.value.args[0], 'Resource ID was not provided'
)
def test_insert_image(self):
diff --git a/tests/unit/api_volume_test.py b/tests/unit/api_volume_test.py
index cb72cb2580..fc2a556d29 100644
--- a/tests/unit/api_volume_test.py
+++ b/tests/unit/api_volume_test.py
@@ -89,6 +89,16 @@ def test_create_volume_invalid_opts_type(self):
'perfectcherryblossom', driver_opts=''
)
+ @requires_api_version('1.24')
+ def test_create_volume_with_no_specified_name(self):
+ result = self.client.create_volume(name=None)
+ self.assertIn('Name', result)
+ self.assertNotEqual(result['Name'], None)
+ self.assertIn('Driver', result)
+ self.assertEqual(result['Driver'], 'local')
+ self.assertIn('Scope', result)
+ self.assertEqual(result['Scope'], 'local')
+
@requires_api_version('1.21')
def test_inspect_volume(self):
name = 'perfectcherryblossom'
diff --git a/tests/unit/fake_api.py b/tests/unit/fake_api.py
index cf3f7d7dd1..2d0a0b4541 100644
--- a/tests/unit/fake_api.py
+++ b/tests/unit/fake_api.py
@@ -389,11 +389,13 @@ def get_fake_volume_list():
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom'
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
- 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism'
+ 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
+ 'Scope': 'local'
}
]
}
@@ -408,7 +410,8 @@ def get_fake_volume():
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
- }
+ },
+ 'Scope': 'local'
}
return status_code, response
diff --git a/tests/unit/models_containers_test.py b/tests/unit/models_containers_test.py
index c3086c629a..ae1bd12aae 100644
--- a/tests/unit/models_containers_test.py
+++ b/tests/unit/models_containers_test.py
@@ -100,6 +100,9 @@ def test_create_container_args(self):
volumes=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro',
],
volumes_from=['container'],
working_dir='/code'
@@ -116,6 +119,9 @@ def test_create_container_args(self):
'Binds': [
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
+ 'volumename:/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath:ro'
],
'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
@@ -181,7 +187,13 @@ def test_create_container_args(self):
tty=True,
user='bob',
volume_driver='some_driver',
- volumes=['/home/user1/', '/var/www'],
+ volumes=[
+ '/mnt/vol2',
+ '/mnt/vol1',
+ '/mnt/vol3',
+ '/volumewithnohostpath',
+ '/anothervolumewithnohostpath'
+ ],
working_dir='/code'
)
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index cf00616d3d..854d0ef2cd 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -5,6 +5,7 @@
import os
import os.path
import shutil
+import socket
import sys
import tarfile
import tempfile
@@ -22,10 +23,9 @@
decode_json_header, tar, split_command, parse_devices, update_headers,
)
+from docker.utils.build import should_check_directory
from docker.utils.ports import build_port_bindings, split_port
-from docker.utils.utils import (
- format_environment, should_check_directory
-)
+from docker.utils.utils import format_environment
from ..helpers import make_tree
@@ -810,6 +810,17 @@ def test_subdirectory_win32_pathsep(self):
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
+ def test_double_wildcard(self):
+ assert self.exclude(['**/a.py']) == convert_paths(
+ self.all_paths - set(
+ ['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
+ )
+ )
+
+ assert self.exclude(['foo/**/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
@@ -894,6 +905,20 @@ def test_tar_with_directory_symlinks(self):
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
+ def test_tar_socket_file(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ sock = socket.socket(socket.AF_UNIX)
+ self.addCleanup(sock.close)
+ sock.bind(os.path.join(base, 'test.sock'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ self.assertEqual(
+ sorted(tar_data.getnames()), ['bar', 'foo']
+ )
+
class ShouldCheckDirectoryTest(unittest.TestCase):
exclude_patterns = [